diff --git "a/emnlp/emnlp2022.json" "b/emnlp/emnlp2022.json" new file mode 100644--- /dev/null +++ "b/emnlp/emnlp2022.json" @@ -0,0 +1,44691 @@ +[ + { + "id": "2022.findings-emnlp.466", + "title": "A Benchmark and Dataset for Post-OCR text correction in Sanskrit", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Sanskrit is a classical language with about 30 million extant manuscripts fit for digitisation, available in written, printed or scanned-image forms. However, it is still considered to be a low-resource language when it comes to available digital resources. In this work, we release a post-OCR text correction dataset containing around 218,000 sentences, with 1.5 million words, from 30 different books. Texts in Sanskrit are known to be diverse in terms of their linguistic and stylistic usage since Sanskrit was the \u2018lingua francua\u2019 for discourse in the Indian subcontinent for about 3 millennia. Keeping this in mind, we release a multi-domain dataset, from areas as diverse as astronomy, medicine and mathematics, with some of them as old as 18 centuries. Further, we release multiple strong baselines as benchmarks for the task, based on pre-trained Seq2Seq language models. We find that our best-performing model, consisting of byte level tokenization in conjunction with phonetic encoding (Byt5+SLP1), yields a 23% point increase over the OCR output in terms of word and character error rates. Moreover, we perform extensive experiments in evaluating these models on their performance and analyse common causes of mispredictions both at the graphemic and lexical levels. Our code and dataset is publicly available at https://github.com/ayushbits/pe-ocr-sanskrit.", + "author": "Ayush Maheshwari; Nikhil Singh; Amrith Krishna; Ganesh Ramakrishnan", + "authorids": "/a/ayush-maheshwari/; /n/nikhil-singh/; /a/amrith-krishna/; /g/ganesh-ramakrishnan/", + "bibtex": "@inproceedings{maheshwari-etal-2022-benchmark,\n title = \"A Benchmark and Dataset for Post-{OCR} text correction in {S}anskrit\",\n author = \"Maheshwari, Ayush and\n Singh, Nikhil and\n Krishna, Amrith and\n Ramakrishnan, Ganesh\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.466/\",\n doi = \"10.18653/v1/2022.findings-emnlp.466\",\n pages = \"6258--6265\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.466.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.466/", + "pdf_size": 912414, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5440618254045618729&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Indian Institute of Technology Bombay; Indian Institute of Technology Bombay+Uniphore; Uniphore; Indian Institute of Technology Bombay", + "aff_domain": "cse.iitb.ac.in;gmail.com;gmail.com;cse.iitb.ac.in", + "email": "cse.iitb.ac.in;gmail.com;gmail.com;cse.iitb.ac.in", + "github": "https://github.com/ayushbits/pe-ocr-sanskrit", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+1;1;0", + "aff_unique_norm": "Indian Institute of Technology Bombay;Uniphore Software Systems", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.iitb.ac.in;https://www.uniphore.com", + "aff_unique_abbr": "IIT Bombay;Uniphore", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Bombay;", + "aff_country_unique_index": "0;0+0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.emnlp-main.126", + "title": "A Comprehensive Comparison of Neural Networks as Cognitive Models of Inflection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Neural networks have long been at the center of a debate around the cognitive mechanism by which humans process inflectional morphology. This debate has gravitated into NLP by way of the question: Are neural networks a feasible account for human behavior in morphological inflection?We address that question by measuring the correlation between human judgments and neural network probabilities for unknown word inflections. We test a larger range of architectures than previously studied on two important tasks for the cognitive processing debate: English past tense, and German number inflection. We find evidence that the Transformer may be a better account of human behavior than LSTMs on these datasets, and that LSTM features known to increase inflection accuracy do not always result in more human-like behavior.", + "author": "Adam Wiemerslage; Shiran Dudy; Katharina Kann", + "authorids": "/a/adam-wiemerslage/; /s/shiran-dudy/; /k/katharina-von-der-wense/", + "bibtex": "@inproceedings{wiemerslage-etal-2022-comprehensive,\n title = \"A Comprehensive Comparison of Neural Networks as Cognitive Models of Inflection\",\n author = \"Wiemerslage, Adam and\n Dudy, Shiran and\n Kann, Katharina\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.126/\",\n doi = \"10.18653/v1/2022.emnlp-main.126\",\n pages = \"1933--1945\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.126.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.126/", + "pdf_size": 1024633, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6777339332532176069&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "University of Colorado Boulder; University of Colorado Boulder; University of Colorado Boulder", + "aff_domain": "colorado.edu;colorado.edu;colorado.edu", + "email": "colorado.edu;colorado.edu;colorado.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Colorado", + "aff_unique_dep": "", + "aff_unique_url": "https://www.colorado.edu", + "aff_unique_abbr": "CU Boulder", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Boulder", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.61", + "title": "A Comprehensive Evaluation of Biomedical Entity-centric Search", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Biomedical information retrieval has often been studied as a task of detecting whether a system correctly detects entity spans and links these entities to concepts from a given terminology. Most academic research has focused on evaluation of named entity recognition (NER) and entity linking (EL) models which are key components to recognizing diseases and genes in PubMed abstracts. In this work, we perform a fine-grained evaluation intended to understand the efficiency of state-of-the-art BERT-based information extraction (IE) architecture as a biomedical search engine. We present a novel manually annotated dataset of abstracts for disease and gene search. The dataset contains 23K query-abstract pairs, where 152 queries are selected from logs of our target discovery platform and PubMed abstracts annotated with relevance judgments. Specifically, the query list also includes a subset of concepts with at least one ambiguous concept name. As a baseline, we use off-she-shelf Elasticsearch with BM25. Our experiments on NER, EL, and retrieval in a zero-shot setup show the neural IE architecture shows superior performance for both disease and gene concept queries.", + "author": "Elena Tutubalina; Zulfat Miftahutdinov; Vladimir Muravlev; Anastasia Shneyderman", + "authorids": "/e/elena-tutubalina/; /z/zulfat-miftahutdinov/; /v/vladimir-muravlev/; /a/anastasia-shneyderman/", + "bibtex": "@inproceedings{tutubalina-etal-2022-comprehensive,\n title = \"A Comprehensive Evaluation of Biomedical Entity-centric Search\",\n author = \"Tutubalina, Elena and\n Miftahutdinov, Zulfat and\n Muravlev, Vladimir and\n Shneyderman, Anastasia\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.61/\",\n doi = \"10.18653/v1/2022.emnlp-industry.61\",\n pages = \"596--605\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.61.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.61/", + "pdf_size": 458726, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7817667932712355628&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Insilico Medicine Hong Kong; Insilico Medicine Hong Kong; Insilico Medicine Hong Kong; Insilico Medicine Hong Kong", + "aff_domain": "insilicomedicine.com;insilicomedicine.com;insilicomedicine.com;insilicomedicine.com", + "email": "insilicomedicine.com;insilicomedicine.com;insilicomedicine.com;insilicomedicine.com", + "github": "", + "project": "https://pandaomics.com/", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Insilico Medicine", + "aff_unique_dep": "", + "aff_unique_url": "https://insilico.com", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Hong Kong", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.157", + "title": "A Critical Reflection and Forward Perspective on Empathy and Natural Language Processing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We review the state of research on empathy in natural language processing and identify the following issues: (1) empathy definitions are absent or abstract, which (2) leads to low construct validity and reproducibility. Moreover, (3) emotional empathy is overemphasized, skewing our focus to a narrow subset of simplified tasks. We believe these issues hinder research progress and argue that current directions will benefit from a clear conceptualization that includes operationalizing cognitive empathy components. Our main objectives are to provide insight and guidance on empathy conceptualization for NLP research objectives and to encourage researchers to pursue the overlooked opportunities in this area, highly relevant, e.g., for clinical and educational sectors.", + "author": "Allison Lahnala; Charles Welch; David Jurgens; Lucie Flek", + "authorids": "/a/allison-lahnala/; /c/charles-welch/; /d/david-jurgens/; /l/lucie-flek/", + "bibtex": "@inproceedings{lahnala-etal-2022-critical,\n title = \"A Critical Reflection and Forward Perspective on Empathy and Natural Language Processing\",\n author = \"Lahnala, Allison and\n Welch, Charles and\n Jurgens, David and\n Flek, Lucie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.157/\",\n doi = \"10.18653/v1/2022.findings-emnlp.157\",\n pages = \"2139--2158\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.157.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.157/", + "pdf_size": 250466, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14525581467248511549&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Conversational AI and Social Analytics (CAISA) Lab, Department of Mathematics and Computer Science, University of Marburg + The Hessian Center for Artificial Intelligence (Hessian.AI); Conversational AI and Social Analytics (CAISA) Lab, Department of Mathematics and Computer Science, University of Marburg + The Hessian Center for Artificial Intelligence (Hessian.AI); School of Information, University of Michigan; Conversational AI and Social Analytics (CAISA) Lab, Department of Mathematics and Computer Science, University of Marburg + The Hessian Center for Artificial Intelligence (Hessian.AI)", + "aff_domain": "uni-marburg.de;uni-marburg.de;umich.edu;uni-marburg.de", + "email": "uni-marburg.de;uni-marburg.de;umich.edu;uni-marburg.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;2;0+1", + "aff_unique_norm": "University of Marburg;Hessian Center for Artificial Intelligence;University of Michigan", + "aff_unique_dep": "Department of Mathematics and Computer Science;Artificial Intelligence;School of Information", + "aff_unique_url": "https://www.uni-marburg.de;https://hessian.ai;https://www.umich.edu", + "aff_unique_abbr": "UM;Hessian.AI;UM", + "aff_campus_unique_index": ";;1;", + "aff_campus_unique": ";Ann Arbor", + "aff_country_unique_index": "0+0;0+0;1;0+0", + "aff_country_unique": "Germany;United States" + }, + { + "id": "2022.emnlp-main.688", + "title": "A Dataset for Hyper-Relational Extraction and a Cube-Filling Approach", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Relation extraction has the potential for large-scale knowledge graph construction, but current methods do not consider the qualifier attributes for each relation triplet, such as time, quantity or location. The qualifiers form hyper-relational facts which better capture the rich and complex knowledge graph structure. For example, the relation triplet (Leonard Parker, Educated At, Harvard University) can be factually enriched by including the qualifier (End Time, 1967). Hence, we propose the task of hyper-relational extraction to extract more specific and complete facts from text. To support the task, we construct HyperRED, a large-scale and general-purpose dataset. Existing models cannot perform hyper-relational extraction as it requires a model to consider the interaction between three entities. Hence, we propose CubeRE, a cube-filling model inspired by table-filling approaches and explicitly considers the interaction between relation triplets and qualifiers. To improve model scalability and reduce negative class imbalance, we further propose a cube-pruning method. Our experiments show that CubeRE outperforms strong baselines and reveal possible directions for future research. Our code and data are available at github.com/declare-lab/HyperRED.", + "author": "Yew Ken Chia; Lidong Bing; Sharifah Mahani Aljunied; Luo Si; Soujanya Poria", + "authorids": "/y/yew-ken-chia/; /l/lidong-bing/; /s/sharifah-mahani-aljunied/; /l/luo-si/; /s/soujanya-poria/", + "bibtex": "@inproceedings{chia-etal-2022-dataset,\n title = \"A Dataset for Hyper-Relational Extraction and a Cube-Filling Approach\",\n author = \"Chia, Yew Ken and\n Bing, Lidong and\n Aljunied, Sharifah Mahani and\n Si, Luo and\n Poria, Soujanya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.688/\",\n doi = \"10.18653/v1/2022.emnlp-main.688\",\n pages = \"10114--10133\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.688.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.688/", + "pdf_size": 425500, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8817449330411823292&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "github.com/declare-lab/HyperRED", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.67", + "title": "A Distributional Lens for Multi-Aspect Controllable Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multi-aspect controllable text generation is a more challenging and practical task than single-aspect control. Existing methods achieve complex multi-aspect control by fusing multiple controllers learned from single-aspect, but suffer from attribute degeneration caused by the mutual interference of these controllers. To address this, we provide observations on attribute fusion from a distributional perspective and propose to directly search for the intersection areas of multiple attribute distributions as their combination for generation. Our method first estimates the attribute space with an autoencoder structure. Afterward, we iteratively approach the intersections by jointly minimizing distances to points representing different attributes. Finally, we map them to attribute-relevant sentences with a prefix-tuning-based decoder. Experiments on the three-aspect control task, including sentiment, topic, and detoxification aspects, reveal that our method outperforms several strong baselines on attribute relevance and text quality and achieves the SOTA. Further analysis also supplies some explanatory support for the effectiveness of our approach.", + "author": "Yuxuan Gu; Xiaocheng Feng; Sicheng Ma; Lingyuan Zhang; Heng Gong; Bing Qin", + "authorids": "/y/yuxuan-gu/; /x/xiaocheng-feng/; /s/sicheng-ma/; /l/lingyuan-zhang/; /h/heng-gong/; /b/bing-qin/", + "bibtex": "@inproceedings{gu-etal-2022-distributional,\n title = \"A Distributional Lens for Multi-Aspect Controllable Text Generation\",\n author = \"Gu, Yuxuan and\n Feng, Xiaocheng and\n Ma, Sicheng and\n Zhang, Lingyuan and\n Gong, Heng and\n Qin, Bing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.67/\",\n doi = \"10.18653/v1/2022.emnlp-main.67\",\n pages = \"1023--1043\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.67.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.67/", + "pdf_size": 6065355, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8262166770190353863&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Harbin Institute of Technology; Harbin Institute of Technology+Peng Cheng Laboratory; Harbin Institute of Technology; Harbin Institute of Technology; Harbin Institute of Technology; Harbin Institute of Technology+Peng Cheng Laboratory", + "aff_domain": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "email": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "github": "https://github.com/HappyGu0524/MultiControl", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0;0;0;0+1", + "aff_unique_norm": "Harbin Institute of Technology;Peng Cheng Laboratory", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.hit.edu.cn/;http://www.pcl.ac.cn", + "aff_unique_abbr": "HIT;PCL", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Harbin;", + "aff_country_unique_index": "0;0+0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.819", + "title": "A Federated Approach to Predicting Emojis in Hindi Tweets", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The use of emojis affords a visual modality to, often private, textual communication.The task of predicting emojis however provides a challenge for machine learning as emoji use tends to cluster into the frequently used and the rarely used emojis.Much of the machine learning research on emoji use has focused on high resource languages and has conceptualised the task of predicting emojis around traditional server-side machine learning approaches.However, traditional machine learning approaches for private communication can introduce privacy concerns, as these approaches require all data to be transmitted to a central storage.In this paper, we seek to address the dual concerns of emphasising high resource languages for emoji prediction and risking the privacy of people\u2019s data.We introduce a new dataset of 118k tweets (augmented from 25k unique tweets) for emoji prediction in Hindi, and propose a modification to the federated learning algorithm, CausalFedGSD, which aims to strike a balance between model performance and user privacy. We show that our approach obtains comparative scores with more complex centralised models while reducing the amount of data required to optimise the models and minimising risks to user privacy.", + "author": "Deep Gandhi; Jash Mehta; Nirali Parekh; Karan Waghela; Lynette D\u2019Mello; Zeerak Talat", + "authorids": "/d/deep-gandhi/; /j/jash-mehta/; /n/nirali-parekh/; /k/karan-waghela/; /l/lynette-dmello/; /z/zeerak-talat/", + "bibtex": "@inproceedings{gandhi-etal-2022-federated,\n title = \"A Federated Approach to Predicting Emojis in {H}indi Tweets\",\n author = \"Gandhi, Deep and\n Mehta, Jash and\n Parekh, Nirali and\n Waghela, Karan and\n D{'}Mello, Lynette and\n Talat, Zeerak\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.819/\",\n doi = \"10.18653/v1/2022.emnlp-main.819\",\n pages = \"11951--11961\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.819.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.819/", + "pdf_size": 6602294, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17172264767289017669&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "University of Alberta; Georgia Institute of Technology; Stanford University; Santa Clara University; DJ Sanghvi College of Engineering; Simon Fraser University", + "aff_domain": "ualberta.ca;gatech.edu;stanford.edu;scu.edu;djsce.ac.in;sfu.ca", + "email": "ualberta.ca;gatech.edu;stanford.edu;scu.edu;djsce.ac.in;sfu.ca", + "github": "https://github.com/deep1401/fedmojie", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;5", + "aff_unique_norm": "University of Alberta;Georgia Institute of Technology;Stanford University;Santa Clara University;DJ Sanghvi College of Engineering;Simon Fraser University", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "https://www.ualberta.ca;https://www.gatech.edu;https://www.stanford.edu;https://www.scu.edu;https://www.djsce.ac.in;https://www.sfu.ca", + "aff_unique_abbr": "UAlberta;Georgia Tech;Stanford;SCU;DJSCE;SFU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0;1;1;1;2;0", + "aff_country_unique": "Canada;United States;India" + }, + { + "id": "2022.findings-emnlp.72", + "title": "A Few More Examples May Be Worth Billions of Parameters", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We investigate the dynamics of increasing the number of model parameters versus the number of labeled examples across a wide variety of tasks. Our exploration reveals that while scaling parameters consistently yields performance improvements, the contribution of additional examples highly depends on the task\u2019s format. Specifically, in open question answering tasks, enlarging the training set does not improve performance. In contrast, classification, extractive question answering, and multiple choice tasks benefit so much from additional examples that collecting a few hundred examples is often \u201cworth\u201d billions of parameters. We hypothesize that unlike open question answering, which involves recalling specific information, solving strategies for tasks with a more restricted output space transfer across examples, and can therefore be learned with small amounts of labeled data.", + "author": "Yuval Kirstain; Patrick Lewis; Sebastian Riedel; Omer Levy", + "authorids": "/y/yuval-kirstain/; /p/patrick-lewis/; /s/sebastian-riedel/; /o/omer-levy/", + "bibtex": "@inproceedings{kirstain-etal-2022-examples,\n title = \"A Few More Examples May Be Worth Billions of Parameters\",\n author = \"Kirstain, Yuval and\n Lewis, Patrick and\n Riedel, Sebastian and\n Levy, Omer\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.72/\",\n doi = \"10.18653/v1/2022.findings-emnlp.72\",\n pages = \"1017--1029\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.72.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.72/", + "pdf_size": 749418, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16203153135594739942&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.700", + "title": "A Fine-grained Chinese Software Privacy Policy Dataset for Sequence Labeling and Regulation Compliant Identification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Privacy protection raises great attention on both legal levels and user awareness. To protect user privacy, countries enact laws and regulations requiring software privacy policies to regulate their behavior. However, privacy policies are written in professional languages with many legal terms and software jargon that prevent users from understanding and even reading them. It is necessary and urgent to use NLP techniques to analyze privacy policies. However, existing datasets ignore law requirements and are limited to English. In this paper, we construct the first Chinese privacy policy dataset, namely CA4P-483, to facilitate the sequence labeling tasks and regulation compliance identification between privacy policies and software. Our dataset includes 483 Chinese Android application privacy policies, over 11K sentences, and 52K fine-grained annotations. We evaluate families of robust and representative baseline models on our dataset. Based on baseline performance, we provide findings and potential research directions on our dataset. Finally, we investigate the potential applications of CA4P-483 combing regulation requirements and program analysis.", + "author": "Kaifa Zhao; Le Yu; Shiyao Zhou; Jing Li; Xiapu Luo; Yat Fei Aemon Chiu; Yutong Liu", + "authorids": "/k/kaifa-zhao/; /l/le-yu/; /s/shiyao-zhou/; /j/jing-li/; /x/xiapu-luo/; /y/yat-fei-aemon-chiu/; /y/yutong-liu/", + "bibtex": "@inproceedings{zhao-etal-2022-fine-grained,\n title = \"A Fine-grained {C}hinese Software Privacy Policy Dataset for Sequence Labeling and Regulation Compliant Identification\",\n author = \"Zhao, Kaifa and\n Yu, Le and\n Zhou, Shiyao and\n Li, Jing and\n Luo, Xiapu and\n Chiu, Yat Fei Aemon and\n Liu, Yutong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.700/\",\n doi = \"10.18653/v1/2022.emnlp-main.700\",\n pages = \"10266--10277\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.700.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.700/", + "pdf_size": 448207, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16892093179884618261&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Department of Computing, The Hong Kong Polytechnic University, HKSAR, China; Department of Computing, The Hong Kong Polytechnic University, HKSAR, China; Department of Computing, The Hong Kong Polytechnic University, HKSAR, China; Department of Computing, The Hong Kong Polytechnic University, HKSAR, China; Department of Computing, The Hong Kong Polytechnic University, HKSAR, China; Department of Electronic and Information Engineering, The Hong Kong Polytechnic University, HKSAR, China; Department of Electronic and Information Engineering, The Hong Kong Polytechnic University, HKSAR, China", + "aff_domain": "connect.polyu.hk;polyu.edu.hk;connect.polyu.hk;polyu.edu.hk;polyu.edu.hk;connect.polyu.hk;connect.polyu.hk", + "email": "connect.polyu.hk;polyu.edu.hk;connect.polyu.hk;polyu.edu.hk;polyu.edu.hk;connect.polyu.hk;connect.polyu.hk", + "github": "https://github.com/zacharykzhao/CA4P-483", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "The Hong Kong Polytechnic University", + "aff_unique_dep": "Department of Computing", + "aff_unique_url": "https://www.polyu.edu.hk", + "aff_unique_abbr": "PolyU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.398", + "title": "A Framework for Adapting Pre-Trained Language Models to Knowledge Graph Completion", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work has demonstrated that entity representations can be extracted from pre-trained language models to develop knowledge graph completion models that are more robust to the naturally occurring sparsity found in knowledge graphs. In this work, we conduct a comprehensive exploration of how to best extract and incorporate those embeddings into knowledge graph completion models. We explore the suitability of the extracted embeddings for direct use in entity ranking and introduce both unsupervised and supervised processing methods that can lead to improved downstream performance. We then introduce supervised embedding extraction methods that can extract more informative representations. We then synthesize our findings and develop a knowledge graph completion model that significantly outperforms recent neural models.", + "author": "Justin Lovelace; Carolyn Ros\u00e9", + "authorids": "/j/justin-lovelace/; /c/carolyn-rose/", + "bibtex": "@inproceedings{lovelace-rose-2022-framework,\n title = \"A Framework for Adapting Pre-Trained Language Models to Knowledge Graph Completion\",\n author = \"Lovelace, Justin and\n Ros{\\'e}, Carolyn\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.398/\",\n doi = \"10.18653/v1/2022.emnlp-main.398\",\n pages = \"5937--5955\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.398.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.398/", + "pdf_size": 1582510, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7128901073398466469&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Computer Science Department, Cornell University + Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University", + "aff_domain": "cornell.edu;andrew.cmu.edu", + "email": "cornell.edu;andrew.cmu.edu", + "github": "https://github.com/justinlovelace/LM-KG-Completion", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "Cornell University;Carnegie Mellon University", + "aff_unique_dep": "Computer Science Department;", + "aff_unique_url": "https://www.cornell.edu;https://www.cmu.edu", + "aff_unique_abbr": "Cornell;CMU", + "aff_campus_unique_index": "0;2", + "aff_campus_unique": "Ithaca;;Pittsburgh", + "aff_country_unique_index": "0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.342", + "title": "A Framework for Automatic Generation of Spoken Question-Answering Data", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This paper describes a framework to automatically generate a spoken question answering (QA) dataset. The framework consists of a question generation (QG) module to generate questions automatically from given text documents, a text-to-speech (TTS) module to convert the text documents into spoken form and an automatic speech recognition (ASR) module to transcribe the spoken content. The final dataset contains question-answer pairs for both the reference text and ASR transcriptions as well as the audio files corresponding to each reference text. For QG and ASR systems we used pre-trained multilingual encoder-decoder transformer models and fine-tuned these models using a limited amount of manually generated QA data and TTS-based speech data, respectively. As a proof of concept, we investigated the proposed framework for Turkish and generated the Turkish Question Answering (TurQuAse) dataset using Wikipedia articles. Manual evaluation of the automatically generated question- answer pairs and QA performance evaluation with state of-the-art models on TurQuAse show that the proposed framework is efficient for automatically generating spoken QA datasets. To the best of our knowledge, TurQuAse is the first publicly available spoken question answering dataset for Turkish. The proposed framework can be easily extended to other languages where a limited amount of QA data is available.", + "author": "Merve \u00dcnl\u00fc Menev\u015fe; Yusufcan Manav; Ebru Arisoy; Arzucan \u00d6zg\u00fcr", + "authorids": "/m/merve-unlu-menevse/; /y/yusufcan-manav/; /e/ebru-arisoy/; /a/arzucan-ozgur/", + "bibtex": "@inproceedings{unlu-menevse-etal-2022-framework,\n title = \"A Framework for Automatic Generation of Spoken Question-Answering Data\",\n author = {{\\\"U}nl{\\\"u} Menev{\\c{s}}e, Merve and\n Manav, Yusufcan and\n Arisoy, Ebru and\n {\\\"O}zg{\\\"u}r, Arzucan},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.342/\",\n doi = \"10.18653/v1/2022.findings-emnlp.342\",\n pages = \"4659--4666\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.342.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.342/", + "pdf_size": 175880, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3508046061069723259&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Bo\u011fazi\u00e7i University; Bo\u011fazi\u00e7i University; MEF University; Bo\u011fazi\u00e7i University", + "aff_domain": "boun.edu.tr;boun.edu.tr;mef.edu.tr;boun.edu.tr", + "email": "boun.edu.tr;boun.edu.tr;mef.edu.tr;boun.edu.tr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Bo\u011fazi\u00e7i University;MEF University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.boun.edu.tr;https://www.mef.edu.tr", + "aff_unique_abbr": "BU;MEF", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Turkey" + }, + { + "id": "2022.emnlp-main.713", + "title": "A Generative Model for End-to-End Argument Mining with Reconstructed Positional Encoding and Constrained Pointer Mechanism", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Argument mining (AM) is a challenging task as it requires recognizing the complex argumentation structures involving multiple subtasks.To handle all subtasks of AM in an end-to-end fashion, previous works generally transform AM into a dependency parsing task.However, such methods largely require complex pre- and post-processing to realize the task transformation.In this paper, we investigate the end-to-end AM task from a novel perspective by proposing a generative framework, in which the expected outputs of AM are framed as a simple target sequence. Then, we employ a pre-trained sequence-to-sequence language model with a constrained pointer mechanism (CPM) to model the clues for all the subtasks of AM in the light of the target sequence. Furthermore, we devise a reconstructed positional encoding (RPE) to alleviate the order biases induced by the autoregressive generation paradigm.Experimental results show that our proposed framework achieves new state-of-the-art performance on two AM benchmarks.", + "author": "Jianzhu Bao; Yuhang He; Yang Sun; Bin Liang; Jiachen Du; Bing Qin; Min Yang; Ruifeng Xu", + "authorids": "/j/jianzhu-bao/; /y/yuhang-he/; /y/yang-sun/; /b/bin-liang/; /j/jiachen-du/; /b/bing-qin/; /m/min-yang/; /r/ruifeng-xu/", + "bibtex": "@inproceedings{bao-etal-2022-generative,\n title = \"A Generative Model for End-to-End Argument Mining with Reconstructed Positional Encoding and Constrained Pointer Mechanism\",\n author = \"Bao, Jianzhu and\n He, Yuhang and\n Sun, Yang and\n Liang, Bin and\n Du, Jiachen and\n Qin, Bing and\n Yang, Min and\n Xu, Ruifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.713/\",\n doi = \"10.18653/v1/2022.emnlp-main.713\",\n pages = \"10437--10449\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.713.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.713/", + "pdf_size": 389255, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13719410378782216132&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Insitute of Technology, Shenzhen, China; Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "gmail.com;outlook.com;mail.ustc.edu.cn;stu.hit.edu.cn;gmail.com;ir.hit.edu.cn;siat.ac.cn;hit.edu.cn", + "email": "gmail.com;outlook.com;mail.ustc.edu.cn;stu.hit.edu.cn;gmail.com;ir.hit.edu.cn;siat.ac.cn;hit.edu.cn", + "github": "https://github.com/HITSZ-HLT/GMAM", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1;0;2;0+1+3", + "aff_unique_norm": "Harbin Institute of Technology;Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies;Shenzhen Institute of Advanced Technology;Peng Cheng Laboratory", + "aff_unique_dep": ";Provincial Key Laboratory of Novel Security Intelligence Technologies;;", + "aff_unique_url": "http://en.hhit.edu.cn/;;http://www.siat.cas.cn;", + "aff_unique_abbr": "HIT;;SIAT;", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.31", + "title": "A Good Neighbor, A Found Treasure: Mining Treasured Neighbors for Knowledge Graph Entity Typing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The task of knowledge graph entity typing (KGET) aims to infer the missing types for entities in knowledge graphs. Some pioneering work has proved that neighbor information is very important for the task. However, existing methods only leverage the one-hop neighbor information of the central entity, ignoring the multi-hop neighbor information that can provide valuable clues for inference. Besides, we also observe that there are co-occurrence relations between types, which is very helpful to alleviate false-negative problem. In this paper, we propose a novel method called Mining Treasured Neighbors (MiNer) to make use of these two characteristics. Firstly, we devise a Neighbor Information Aggregation module to aggregate the neighbor information. Then, we propose an Entity Type Inference module to mitigate the adverse impact of the irrelevant neighbor information. Finally, a Type Co-occurrence Regularization module is designed to prevent the model from overfitting the false negative examples caused by missing types. Experimental results on two widely used datasets indicate that our approach significantly outperforms previous state-of-the-art methods.", + "author": "Zhuoran Jin; Pengfei Cao; Yubo Chen; Kang Liu; Jun Zhao", + "authorids": "/z/zhuoran-jin/; /p/pengfei-cao/; /y/yubo-chen/; /k/kang-liu/; /j/jun-zhao/", + "bibtex": "@inproceedings{jin-etal-2022-good-neighbor,\n title = \"A Good Neighbor, A Found Treasure: Mining Treasured Neighbors for Knowledge Graph Entity Typing\",\n author = \"Jin, Zhuoran and\n Cao, Pengfei and\n Chen, Yubo and\n Liu, Kang and\n Zhao, Jun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.31/\",\n doi = \"10.18653/v1/2022.emnlp-main.31\",\n pages = \"480--490\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.31.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.31/", + "pdf_size": 423958, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1129476957667406906&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China + National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China; School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China + National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China; School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China + National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China; School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China + National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China + Beijing Academy of Artificial Intelligence, Beijing, China; School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China + National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China", + "aff_domain": "nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "email": "nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "github": "https://github.com/jinzhuoran/MiNer/", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;0+1+2;0+1", + "aff_unique_norm": "University of Chinese Academy of Sciences;National Laboratory of Pattern Recognition;Beijing Academy of Artificial Intelligence", + "aff_unique_dep": "School of Artificial Intelligence;Institute of Automation;", + "aff_unique_url": "http://www.ucas.ac.cn;;https://www.baaic.cn", + "aff_unique_abbr": "UCAS;;BAAI", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0+0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.184", + "title": "A Hierarchical N-Gram Framework for Zero-Shot Link Prediction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Knowledge graphs typically contain a large number of entities but often cover only a fraction of all relations between them (i.e., incompleteness). Zero-shot link prediction (ZSLP) is a popular way to tackle the problem by automatically identifying unobserved relations between entities. Most recent approaches use textual features of relations (e.g., surface name or textual descriptions) as auxiliary information to improve the encoded representation. These methods lack robustness as they are bound to support only tokens from a fixed vocabulary and unable to model out-of-vocabulary (OOV) words. Subword units such as character n-grams have the capability of generating more expressive representations for OOV words. Hence, in this paper, we propose a Hierarchical N-gram framework for Zero-Shot Link Prediction (HNZSLP) that leverages character n-gram information for ZSLP. Our approach works by first constructing a hierarchical n-gram graph from the surface name of relations. Subsequently, a new Transformer-based network models the hierarchical n-gram graph to learn a relation embedding for ZSLP. Experimental results show that our proposed HNZSLP method achieves state-of-the-art performance on two standard ZSLP datasets.", + "author": "Mingchen Li; Junfan Chen; Samuel Mensah; Nikolaos Aletras; Xiulong Yang; Yang Ye", + "authorids": "/m/mingchen-li/; /j/junfan-chen/; /s/samuel-mensah/; /n/nikolaos-aletras/; /x/xiulong-yang/; /y/yang-ye/", + "bibtex": "@inproceedings{li-etal-2022-hierarchical-n,\n title = \"A Hierarchical N-Gram Framework for Zero-Shot Link Prediction\",\n author = \"Li, Mingchen and\n Chen, Junfan and\n Mensah, Samuel and\n Aletras, Nikolaos and\n Yang, Xiulong and\n Ye, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.184/\",\n doi = \"10.18653/v1/2022.findings-emnlp.184\",\n pages = \"2498--2509\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.184.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.184/", + "pdf_size": 701152, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5589065105414808942&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Georgia State University; Beihang University; University of Sheffield; University of Sheffield; Georgia State University; Georgia State University", + "aff_domain": "student.gsu.edu;act.buaa.edu.cn;sheffield.ac.uk;sheffield.ac.uk;student.gsu.edu;student.gsu.edu", + "email": "student.gsu.edu;act.buaa.edu.cn;sheffield.ac.uk;sheffield.ac.uk;student.gsu.edu;student.gsu.edu", + "github": "https://github.com/ToneLi/HNZSLP", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;2;0;0", + "aff_unique_norm": "Georgia State University;Beihang University;University of Sheffield", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.gsu.edu;http://www.buaa.edu.cn/;https://www.sheffield.ac.uk", + "aff_unique_abbr": "GSU;BUAA;Sheffield", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;2;0;0", + "aff_country_unique": "United States;China;United Kingdom" + }, + { + "id": "2022.emnlp-industry.3", + "title": "A Hybrid Approach to Cross-lingual Product Review Summarization", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "We present a hybrid approach for product review summarization which consists of: (i) an unsupervised extractive step to extract the most important sentences out of all the reviews, and (ii) a supervised abstractive step to summarize the extracted sentences into a coherent short summary. This approach allows us to develop an efficient cross-lingual abstractive summarizer that can generate summaries in any language, given the extracted sentences out of thousands of reviews in a source language. In order to train and test the abstractive model, we create the Cross-lingual Amazon Reviews Summarization (CARS) dataset which provides English summaries for training, and English, French, Italian, Arabic, and Hindi summaries for testing based on selected English reviews. We show that the summaries generated by our model are as good as human written summaries in coherence, informativeness, non-redundancy, and fluency.", + "author": "Saleh Soltan; Victor Soto; Ke Tran; Wael Hamza", + "authorids": "/s/saleh-soltan/; /v/victor-soto/; /k/ke-m-tran/; /w/wael-hamza/", + "bibtex": "@inproceedings{soltan-etal-2022-hybrid,\n title = \"A Hybrid Approach to Cross-lingual Product Review Summarization\",\n author = \"Soltan, Saleh and\n Soto, Victor and\n Tran, Ke and\n Hamza, Wael\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.3/\",\n doi = \"10.18653/v1/2022.emnlp-industry.3\",\n pages = \"18--28\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.3.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.3/", + "pdf_size": 431556, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:0tYOWSDG1pMJ:scholar.google.com/&scioq=A+Hybrid+Approach+to+Cross-lingual+Product+Review+Summarization&hl=en&as_sdt=0,33", + "gs_version_total": 2, + "aff": "Alexa AI, New York, USA; Alexa AI, New York, USA; Amazon AI Translate, Berlin, Germany; Alexa AI, Dallas, USA", + "aff_domain": "amazon.com;amazon.com;amazon.de;amazon.com", + "email": "amazon.com;amazon.com;amazon.de;amazon.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Alexa AI;Amazon", + "aff_unique_dep": "AI Research;Amazon AI Translate", + "aff_unique_url": "https://www.alexa.com;https://www.amazon.de", + "aff_unique_abbr": "Alexa AI;Amazon", + "aff_campus_unique_index": "0;0;1;2", + "aff_campus_unique": "New York;Berlin;Dallas", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "United States;Germany" + }, + { + "id": "2022.emnlp-main.216", + "title": "A Joint Learning Framework for Restaurant Survival Prediction and Explanation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The bloom of the Internet and the recent breakthroughs in deep learning techniques open a new door to AI for E-commence, with a trend of evolving from using a few financial factors such as liquidity and profitability to using more advanced AI techniques to process complex and multi-modal data. In this paper, we tackle the practical problem of restaurant survival prediction. We argue that traditional methods ignore two essential respects, which are very helpful for the task: 1) modeling customer reviews and 2) jointly considering status prediction and result explanation. Thus, we propose a novel joint learning framework for explainable restaurant survival prediction based on the multi-modal data of user-restaurant interactions and users\u2019 textual reviews. Moreover, we design a graph neural network to capture the high-order interactions and design a co-attention mechanism to capture the most informative and meaningful signal from noisy textual reviews. Our results on two datasets show a significant and consistent improvement over the SOTA techniques (average 6.8% improvement in prediction and 45.3% improvement in explanation).", + "author": "Xin Li; Xiaojie Zhang; Peng JiaHao; Rui Mao; Mingyang Zhou; Xing Xie; Hao Liao", + "authorids": "/x/xin-li/; /x/xiaojie-zhang/; /p/peng-jiahao/; /r/rui-mao/; /m/mingyang-zhou/; /x/xing-xie/; /h/hao-liao/", + "bibtex": "@inproceedings{li-etal-2022-joint,\n title = \"A Joint Learning Framework for Restaurant Survival Prediction and Explanation\",\n author = \"Li, Xin and\n Zhang, Xiaojie and\n JiaHao, Peng and\n Mao, Rui and\n Zhou, Mingyang and\n Xie, Xing and\n Liao, Hao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.216/\",\n doi = \"10.18653/v1/2022.emnlp-main.216\",\n pages = \"3285--3297\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.216.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.216/", + "pdf_size": 1570240, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11904980118040512654&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Shenzhen University, China; Shenzhen University, China; Shenzhen University, China; Shenzhen University, China; Shenzhen University, China; Microsoft Research Asia; Shenzhen University, China", + "aff_domain": "email.szu.edu.cn;email.szu.edu.cn;email.szu.edu.cn;szu.edu.cn;szu.edu.cn;microsoft.com;szu.edu.cn", + "email": "email.szu.edu.cn;email.szu.edu.cn;email.szu.edu.cn;szu.edu.cn;szu.edu.cn;microsoft.com;szu.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;1;0", + "aff_unique_norm": "Shenzhen University;Microsoft Research", + "aff_unique_dep": ";Research", + "aff_unique_url": "https://www.szu.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "SZU;MSR Asia", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.182", + "title": "A Localized Geometric Method to Match Knowledge in Low-dimensional Hyperbolic Space", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Matching equivalent entities across Knowledge graphs is a pivotal step for knowledge fusion. Previous approaches usually study the problem in Euclidean space. However, recent works have shown that hyperbolic space has a higher capacity than Euclidean space and hyperbolic embedding can represent the hierarchical structure in a knowledge graph. In this paper, we propose a localized geometric method to find equivalent entities in hyperbolic space. Specifically, we use a hyperbolic neural network to encode the lingual information of entities and the structure of both knowledge graphs into a low-dimensional hyperbolic space. To address the asymmetry of structure on different KGs and the localized nature of relations, we learn an instance-specific geometric mapping function based on rotation to match entity pairs. A contrastive loss function is used to train the model. The experiment verifies the power of low-dimensional hyperbolic space for entity matching and shows that our method outperforms the state of the art by a large margin.", + "author": "Bo Hui; Tian Xia; Wei-Shinn Ku", + "authorids": "/b/bo-hui/; /t/tian-xia/; /w/wei-shinn-ku/", + "bibtex": "@inproceedings{hui-etal-2022-localized,\n title = \"A Localized Geometric Method to Match Knowledge in Low-dimensional Hyperbolic Space\",\n author = \"Hui, Bo and\n Xia, Tian and\n Ku, Wei-Shinn\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.182/\",\n doi = \"10.18653/v1/2022.emnlp-main.182\",\n pages = \"2822--2832\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.182.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.182/", + "pdf_size": 563841, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8718701141648112439&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Auburn University; Auburn University; Auburn University", + "aff_domain": "auburn.edu;auburn.edu;auburn.edu", + "email": "auburn.edu;auburn.edu;auburn.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Auburn University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.auburn.edu", + "aff_unique_abbr": "Auburn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.612", + "title": "A Major Obstacle for NLP Research: Let\u2019s Talk about Time Allocation!", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The field of natural language processing (NLP) has grown over the last few years: conferences have become larger, we have published an incredible amount of papers, and state-of-the-art research has been implemented in a large variety of customer-facing products. However, this paper argues that we have been less successful than we *should* have been and reflects on where and how the field fails to tap its full potential. Specifically, we demonstrate that, in recent years, **subpar time allocation has been a major obstacle for NLP research**. We outline multiple concrete problems together with their negative consequences and, importantly, suggest remedies to improve the status quo. We hope that this paper will be a starting point for discussions around which common practices are \u2013 or are *not* \u2013 beneficial for NLP research.", + "author": "Katharina Kann; Shiran Dudy; Arya D. McCarthy", + "authorids": "/k/katharina-von-der-wense/; /s/shiran-dudy/; /a/arya-d-mccarthy/", + "bibtex": "@inproceedings{kann-etal-2022-major,\n title = \"A Major Obstacle for {NLP} Research: Let`s Talk about Time Allocation!\",\n author = \"Kann, Katharina and\n Dudy, Shiran and\n McCarthy, Arya D.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.612/\",\n doi = \"10.18653/v1/2022.emnlp-main.612\",\n pages = \"8959--8969\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.612.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.612/", + "pdf_size": 184626, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18209927587062290303&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 3, + "aff": "University of Colorado Boulder; University of Colorado Boulder; Johns Hopkins University", + "aff_domain": "colorado.edu;colorado.edu;jhu.edu", + "email": "colorado.edu;colorado.edu;jhu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "University of Colorado;Johns Hopkins University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.colorado.edu;https://www.jhu.edu", + "aff_unique_abbr": "CU Boulder;JHU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Boulder;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.171", + "title": "A Multi-Modal Knowledge Graph for Classical Chinese Poetry", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Classical Chinese poetry has a long history and is a precious cultural heritage of humankind. Displaying the classical Chinese poetry in a visual way, helps to cross cultural barriers in different countries, making it enjoyable for all the people. In this paper, we construct a multi-modal knowledge graph for classical Chinese poetry (PKG), in which the visual information of words in the poetry are incorporated. Then a multi-modal pre-training language model, PKG-Bert, is proposed to obtain the poetry representation with visual information, which bridges the semantic gap between different modalities. PKG-Bert achieves the state-of-the-art performance on the poetry-image retrieval task, showing the effectiveness of incorporating the multi-modal knowledge. The large-scale multi-modal knowledge graph of classical Chinese poetry will be released to promote the researches in classical Chinese culture area.", + "author": "Yuqing Li; Yuxin Zhang; Bin Wu; Ji-Rong Wen; Ruihua Song; Ting Bai", + "authorids": "/y/yuqing-li/; /y/yuxin-zhang/; /b/bin-wu/; /j/ji-rong-wen/; /r/ruihua-song/; /t/ting-bai/", + "bibtex": "@inproceedings{li-etal-2022-multi-modal,\n title = \"A Multi-Modal Knowledge Graph for Classical {C}hinese Poetry\",\n author = \"Li, Yuqing and\n Zhang, Yuxin and\n Wu, Bin and\n Wen, Ji-Rong and\n Song, Ruihua and\n Bai, Ting\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.171/\",\n doi = \"10.18653/v1/2022.findings-emnlp.171\",\n pages = \"2318--2326\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.171.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.171/", + "pdf_size": 2277979, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13240709464825254830&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/liyuqing1/PKG-Bert", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.153", + "title": "A Multifaceted Framework to Evaluate Evasion, Content Preservation, and Misattribution in Authorship Obfuscation Techniques", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Authorship obfuscation techniques have commonly been evaluated based on their ability to hide the author\u2019s identity (evasion) while preserving the content of the original text. However, to avoid overstating the systems\u2019 effectiveness, evasion detection must be evaluated using competitive identification techniques in settings that mimic real-life scenarios, and the outcomes of the content-preservation evaluation have to be interpretable by potential users of these obfuscation tools. Motivated by recent work on cross-topic authorship identification and content preservation in summarization, we re-evaluate different authorship obfuscation techniques on detection evasion and content preservation. Furthermore, we propose a new information-theoretic measure to characterize the misattribution harm that can be caused by detection evasion. Our results reveal key weaknesses in state-of-the-art obfuscation techniques and a surprisingly competitive effectiveness from a back-translation baseline in all evaluation aspects.", + "author": "Malik Altakrori; Thomas Scialom; Benjamin C. M. Fung; Jackie Chi Kit Cheung", + "authorids": "/m/malik-altakrori/; /t/thomas-scialom/; /b/benjamin-c-m-fung/; /j/jackie-chi-kit-cheung/", + "bibtex": "@inproceedings{altakrori-etal-2022-multifaceted,\n title = \"A Multifaceted Framework to Evaluate Evasion, Content Preservation, and Misattribution in Authorship Obfuscation Techniques\",\n author = \"Altakrori, Malik and\n Scialom, Thomas and\n Fung, Benjamin C. M. and\n Cheung, Jackie Chi Kit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.153/\",\n doi = \"10.18653/v1/2022.emnlp-main.153\",\n pages = \"2391--2406\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.153.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.153/", + "pdf_size": 283815, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12969802747107695812&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "School of Computer Science, McGill University / Mila, Montreal, Canada; Meta AI, Paris, France; School of Information Studies, McGill University / Mila, Montreal, Canada; School of Computer Science, McGill University / Mila, Montreal, Canada", + "aff_domain": "mail.mcgill.ca;meta.com;mcgill.ca;cs.mcgill.ca", + "email": "mail.mcgill.ca;meta.com;mcgill.ca;cs.mcgill.ca", + "github": "https://malikaltakrori.github.io/papers/EMNLP2022/", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "McGill University;Meta AI", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "https://www.mcgill.ca;https://meta.ai", + "aff_unique_abbr": "McGill;Meta AI", + "aff_campus_unique_index": "0;1;0;0", + "aff_campus_unique": "Montreal;Paris", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "Canada;France" + }, + { + "id": "2022.emnlp-main.101", + "title": "A Multilingual Perspective Towards the Evaluation of Attribution Methods in Natural Language Inference", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Most evaluations of attribution methods focus on the English language. In this work, we present a multilingual approach for evaluating attribution methods for the Natural Language Inference (NLI) task in terms of faithfulness and plausibility.First, we introduce a novel cross-lingual strategy to measure faithfulness based on word alignments, which eliminates the drawbacks of erasure-based evaluations.We then perform a comprehensive evaluation of attribution methods, considering different output mechanisms and aggregation methods.Finally, we augment the XNLI dataset with highlight-based explanations, providing a multilingual NLI dataset with highlights, to support future exNLP studies. Our results show that attribution methods performing best for plausibility and faithfulness are different.", + "author": "Kerem Zaman; Yonatan Belinkov", + "authorids": "/k/kerem-zaman/; /y/yonatan-belinkov/", + "bibtex": "@inproceedings{zaman-belinkov-2022-multilingual,\n title = \"A Multilingual Perspective Towards the Evaluation of Attribution Methods in Natural Language Inference\",\n author = \"Zaman, Kerem and\n Belinkov, Yonatan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.101/\",\n doi = \"10.18653/v1/2022.emnlp-main.101\",\n pages = \"1556--1576\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.101.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.101/", + "pdf_size": 6488747, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3273731152228454320&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "UNC Chapel Hill + Bo\u011fazi\u00e7i University; Technion \u2013 Israel Institute of Technology", + "aff_domain": "cs.unc.edu;technion.ac.il", + "email": "cs.unc.edu;technion.ac.il", + "github": "", + "project": "https://www.keremzaman.com/explaiNLI", + "author_num": 2, + "aff_unique_index": "0+1;2", + "aff_unique_norm": "University of North Carolina at Chapel Hill;Bo\u011fazi\u00e7i University;Technion \u2013 Israel Institute of Technology", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.unc.edu;https://www.boun.edu.tr;https://www.technion.ac.il/en/", + "aff_unique_abbr": "UNC;BU;Technion", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Chapel Hill;", + "aff_country_unique_index": "0+1;2", + "aff_country_unique": "United States;Turkey;Israel" + }, + { + "id": "2022.findings-emnlp.158", + "title": "A Neural-Symbolic Approach to Natural Language Understanding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Deep neural networks, empowered by pre-trained language models, have achieved remarkable results in natural language understanding (NLU) tasks. However, their performances can drastically deteriorate when logical reasoning is needed. This is because NLU in principle depends on not only analogical reasoning, which deep neural networks are good at, but also logical reasoning. According to the dual-process theory, analogical reasoning and logical reasoning are respectively carried out by System 1 and System 2 in the human brain. Inspired by the theory, we present a novel framework for NLU called Neural-Symbolic Processor (NSP), which performs analogical reasoning based on neural processing and logical reasoning based on both neural and symbolic processing. As a case study, we conduct experiments on two NLU tasks, question answering (QA) and natural language inference (NLI), when numerical reasoning (a type of logical reasoning) is necessary. The experimental results show that our method significantly outperforms state-of-the-art methods in both tasks.", + "author": "Zhixuan Liu; Zihao Wang; Yuan Lin; Hang Li", + "authorids": "/z/zhixuan-liu/; /z/zihao-wang/; /y/yuan-lin/; /h/hang-li/", + "bibtex": "@inproceedings{liu-etal-2022-neural,\n title = \"A Neural-Symbolic Approach to Natural Language Understanding\",\n author = \"Liu, Zhixuan and\n Wang, Zihao and\n Lin, Yuan and\n Li, Hang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.158/\",\n doi = \"10.18653/v1/2022.findings-emnlp.158\",\n pages = \"2159--2172\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.158.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.158/", + "pdf_size": 276224, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9582418068522007404&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Shanghai Jiaotong University; CSE, HKUST; ByteDance AI Lab+Shanghai Jiaotong University; ByteDance AI Lab", + "aff_domain": "sjtu.edu.cn;cse.ust.hk;bytedance.com;bytedance.com", + "email": "sjtu.edu.cn;cse.ust.hk;bytedance.com;bytedance.com", + "github": "https://github.com/chadlzx/NSP_QA ;https://github.com/zihao-wang/Number-NLI", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2+0;2", + "aff_unique_norm": "Shanghai Jiaotong University;Hong Kong University of Science and Technology;ByteDance", + "aff_unique_dep": ";Department of Computer Science and Engineering;AI Lab", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.ust.hk;https://www.bytedance.com", + "aff_unique_abbr": "SJTU;HKUST;ByteDance", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.504", + "title": "A POMDP Dialogue Policy with 3-way Grounding and Adaptive Sensing for Learning through Communication", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Agents to assist with rescue, surgery, and similar activities could collaborate better with humans if they could learn new strategic behaviors through communication. We introduce a novel POMDP dialogue policy for learning from people. The policy has 3-way grounding of language in the shared physical context, the dialogue context, and persistent knowledge. It can learn distinct but related games, and can continue learning across dialogues for complex games. A novel sensing component supports adaptation to information-sharing differences across people. The single policy performs better than oracle policies customized to specific games and information behavior.", + "author": "Maryam Zare; Alan Wagner; Rebecca Passonneau", + "authorids": "/m/maryam-zare/; /a/alan-wagner/; /r/rebecca-j-passonneau/", + "bibtex": "@inproceedings{zare-etal-2022-pomdp,\n title = \"A {POMDP} Dialogue Policy with 3-way Grounding and Adaptive {S}ensing for Learning through Communication\",\n author = \"Zare, Maryam and\n Wagner, Alan and\n Passonneau, Rebecca\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.504/\",\n doi = \"10.18653/v1/2022.findings-emnlp.504\",\n pages = \"6767--6780\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.504.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.504/", + "pdf_size": 3231164, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2176234365669657030&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Pennsylvania State University, University Park; Pennsylvania State University, University Park; Pennsylvania State University, University Park", + "aff_domain": "psu.edu;psu.edu;psu.edu", + "email": "psu.edu;psu.edu;psu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Pennsylvania State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.psu.edu", + "aff_unique_abbr": "PSU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "University Park", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.292", + "title": "A Second Wave of UD Hebrew Treebanking and Cross-Domain Parsing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Foundational Hebrew NLP tasks such as segmentation, tagging and parsing, have relied to date on various versions of the Hebrew Treebank (HTB, Sima\u2019an et al. 2001). However, the data in HTB, a single-source newswire corpus, is now over 30 years old, and does not cover many aspects of contemporary Hebrew on the web. This paper presents a new, freely available UD treebank of Hebrew stratified from a range of topics selected from Hebrew Wikipedia. In addition to introducing the corpus and evaluating the quality of its annotations, we deploy automatic validation tools based on grew (Guillaume, 2021), and conduct the first cross domain parsing experiments in Hebrew. We obtain new state-of-the-art (SOTA) results on UD NLP tasks, using a combination of the latest language modelling and some incremental improvements to existing transformer based approaches. We also release a new version of the UD HTB matching annotation scheme updates from our new corpus.", + "author": "Amir Zeldes; Nick Howell; Noam Ordan; Yifat Ben Moshe", + "authorids": "/a/amir-zeldes/; /n/nick-howell/; /n/noam-ordan/; /y/yifat-ben-moshe/", + "bibtex": "@inproceedings{zeldes-etal-2022-second,\n title = \"A Second Wave of {UD} {H}ebrew Treebanking and Cross-Domain Parsing\",\n author = \"Zeldes, Amir and\n Howell, Nick and\n Ordan, Noam and\n Ben Moshe, Yifat\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.292/\",\n doi = \"10.18653/v1/2022.emnlp-main.292\",\n pages = \"4331--4344\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.292.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.292/", + "pdf_size": 324492, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13619329319892775889&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Georgetown University; IAHLT; IAHLT; IAHLT", + "aff_domain": "georgetown.edu;gmail.com;gmail.com;gmail.com", + "email": "georgetown.edu;gmail.com;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;1", + "aff_unique_norm": "Georgetown University;Institute of Automation, Chinese Academy of Sciences", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.georgetown.edu;http://www.ia.ac.cn", + "aff_unique_abbr": "GU;IAHLT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.578", + "title": "A Sequential Flow Control Framework for Multi-hop Knowledge Base Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "One of the key challenges of knowledge base question answering (KBQA) is the multi-hop reasoning. Since in different hops, one attends to different parts of question, it is important to dynamically represent the question semantics for each hop. Existing methods, however, (i) infer the dynamic question representation only through coarse-grained attention mechanisms, which may bring information loss, (ii) and have not effectively modeled the sequential logic, which is crucial for the multi-hop reasoning process in KBQA.To address these issues, we propose a sequential reasoning self-attention mechanism to capture the crucial reasoning information of each single hop in a more fine-grained way. Based on Gated Recurrent Unit (GRU) which is good at modeling sequential process, we propose a simple but effective GRU-inspired Flow Control (GFC) framework to model sequential logic in the whole multi-hop process.Extensive experiments on three popular benchmark datasets have demonstrated the superior effectiveness of our model. In particular, GFC achieves new state-of-the-art Hits@1 of 76.8% on WebQSP and is also effective when KB is incomplete. Our code and data are available at https://github.com/Xie-Minghui/GFC.", + "author": "Minghui Xie; Chuzhan Hao; Peng Zhang", + "authorids": "/m/minghui-xie/; /c/chuzhan-hao/; /p/peng-zhang/", + "bibtex": "@inproceedings{xie-etal-2022-sequential,\n title = \"A Sequential Flow Control Framework for Multi-hop Knowledge Base Question Answering\",\n author = \"Xie, Minghui and\n Hao, Chuzhan and\n Zhang, Peng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.578/\",\n doi = \"10.18653/v1/2022.emnlp-main.578\",\n pages = \"8450--8460\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.578.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.578/", + "pdf_size": 1569757, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12023129608476936798&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University", + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn", + "github": "https://github.com/Xie-Minghui/GFC", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Tianjin University", + "aff_unique_dep": "College of Intelligence and Computing", + "aff_unique_url": "http://www.tju.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.681", + "title": "A Simple Contrastive Learning Framework for Interactive Argument Pair Identification via Argument-Context Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Interactive argument pair identification is an emerging research task for argument mining, aiming to identify whether two arguments are interactively related. It is pointed out that the context of the argument is essential to improve identification performance. However, current context-based methods achieve limited improvements since the entire context typically contains much irrelevant information. In this paper, we propose a simple contrastive learning framework to solve this problem by extracting valuable information from the context. This framework can construct hard argument-context samples and obtain a robust and uniform representation by introducing contrastive learning. We also propose an argument-context extraction module to enhance information extraction by discarding irrelevant blocks. The experimental results show that our method achieves the state-of-the-art performance on the benchmark dataset. Further analysis demonstrates the effectiveness of our proposed modules and visually displays more compact semantic representations.", + "author": "Lida Shi; Fausto Giunchiglia; Rui Song; Daqian Shi; Tongtong Liu; Xiaolei Diao; Hao Xu", + "authorids": "/l/lida-shi/; /f/fausto-giunchiglia/; /r/rui-song/; /d/daqian-shi/; /t/tongtong-liu/; /x/xiaolei-diao/; /h/hao-xu/", + "bibtex": "@inproceedings{shi-etal-2022-simple,\n title = \"A Simple Contrastive Learning Framework for Interactive Argument Pair Identification via Argument-Context Extraction\",\n author = \"Shi, Lida and\n Giunchiglia, Fausto and\n Song, Rui and\n Shi, Daqian and\n Liu, Tongtong and\n Diao, Xiaolei and\n Xu, Hao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.681/\",\n doi = \"10.18653/v1/2022.emnlp-main.681\",\n pages = \"10027--10039\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.681.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.681/", + "pdf_size": 1005849, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5477157248318799996&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "https://github.com/shilida/CL_Interactive_Argument_Pair_Identification", + "project": "", + "author_num": 7 + }, + { + "id": "2022.findings-emnlp.501", + "title": "A Simple and Strong Baseline for End-to-End Neural RST-style Discourse Parsing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "To promote and further develop RST-style discourse parsing models, we need a strong baseline that can be regarded as a reference for reporting reliable experimental results. This paper explores a strong baseline by integrating existing simple parsing strategies, top-down and bottom-up, with various transformer-based pre-trained language models.The experimental results obtained from two benchmark datasets demonstrate that the parsing performance strongly relies on the pre-trained language models rather than the parsing strategies.In particular, the bottom-up parser achieves large performance gains compared to the current best parser when employing DeBERTa.We further reveal that language models with a span-masking scheme especially boost the parsing performance through our analysis within intra- and multi-sentential parsing, and nuclearity prediction.", + "author": "Naoki Kobayashi; Tsutomu Hirao; Hidetaka Kamigaito; Manabu Okumura; Masaaki Nagata", + "authorids": "/n/naoki-kobayashi/; /t/tsutomu-hirao/; /h/hidetaka-kamigaito/; /m/manabu-okumura/; /m/masaaki-nagata/", + "bibtex": "@inproceedings{kobayashi-etal-2022-simple,\n title = \"A Simple and Strong Baseline for End-to-End Neural {RST}-style Discourse Parsing\",\n author = \"Kobayashi, Naoki and\n Hirao, Tsutomu and\n Kamigaito, Hidetaka and\n Okumura, Manabu and\n Nagata, Masaaki\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.501/\",\n doi = \"10.18653/v1/2022.findings-emnlp.501\",\n pages = \"6725--6737\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.501.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.501/", + "pdf_size": 348676, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8099537369278787755&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Institute of Innovative Research, Tokyo Institute of Technology; NTT Communication Science Laboratories, NTT Corporation; Institute of Innovative Research, Tokyo Institute of Technology; Institute of Innovative Research, Tokyo Institute of Technology; NTT Communication Science Laboratories, NTT Corporation", + "aff_domain": "lr.pi.titech.ac.jp;hco.ntt.co.jp;lr.pi.titech.ac.jp;pi.titech.ac.jp;hco.ntt.co.jp", + "email": "lr.pi.titech.ac.jp;hco.ntt.co.jp;lr.pi.titech.ac.jp;pi.titech.ac.jp;hco.ntt.co.jp", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;1", + "aff_unique_norm": "Tokyo Institute of Technology;NTT Corporation", + "aff_unique_dep": "Institute of Innovative Research;Communication Science Laboratories", + "aff_unique_url": "https://www.titech.ac.jp;https://www.ntt.co.jp", + "aff_unique_abbr": "Titech;NTT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.emnlp-main.422", + "title": "A Span-based Multimodal Variational Autoencoder for Semi-supervised Multimodal Named Entity Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multimodal named entity recognition (MNER) on social media is a challenging task which aims to extract named entities in free text and incorporate images to classify them into user-defined types. However, the annotation for named entities on social media demands a mount of human efforts. The existing semi-supervised named entity recognition methods focus on the text modal and are utilized to reduce labeling costs in traditional NER. However, the previous methods are not efficient for semi-supervised MNER. Because the MNER task is defined to combine the text information with image one and needs to consider the mismatch between the posted text and image. To fuse the text and image features for MNER effectively under semi-supervised setting, we propose a novel span-based multimodal variational autoencoder (SMVAE) model for semi-supervised MNER. The proposed method exploits modal-specific VAEs to model text and image latent features, and utilizes product-of-experts to acquire multimodal features. In our approach, the implicit relations between labels and multimodal features are modeled by multimodal VAE. Thus, the useful information of unlabeled data can be exploited in our method under semi-supervised setting. Experimental results on two benchmark datasets demonstrate that our approach not only outperforms baselines under supervised setting, but also improves MNER performance with less labeled data than existing semi-supervised methods.", + "author": "Baohang Zhou; Ying Zhang; Kehui Song; Wenya Guo; Guoqing Zhao; Hongbin Wang; Xiaojie Yuan", + "authorids": "/b/baohang-zhou/; /y/ying-zhang/; /k/kehui-song/; /w/wenya-guo/; /g/guoqing-zhao/; /h/hongbin-wang/; /x/xiaojie-yuan/", + "bibtex": "@inproceedings{zhou-etal-2022-span,\n title = \"A Span-based Multimodal Variational Autoencoder for Semi-supervised Multimodal Named Entity Recognition\",\n author = \"Zhou, Baohang and\n Zhang, Ying and\n Song, Kehui and\n Guo, Wenya and\n Zhao, Guoqing and\n Wang, Hongbin and\n Yuan, Xiaojie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.422/\",\n doi = \"10.18653/v1/2022.emnlp-main.422\",\n pages = \"6293--6302\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.422.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.422/", + "pdf_size": 1898410, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5494819495512805114&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "College of Computer Science, Nankai University, Tianjin, China+Tianjin Key Laboratory of Network and Data Security Technology, Tianjin, China; College of Computer Science, Nankai University, Tianjin, China+Tianjin Key Laboratory of Network and Data Security Technology, Tianjin, China; College of Computer Science, Nankai University, Tianjin, China+Tianjin Key Laboratory of Network and Data Security Technology, Tianjin, China; College of Computer Science, Nankai University, Tianjin, China+Tianjin Key Laboratory of Network and Data Security Technology, Tianjin, China; Mashang Consumer Finanace Co, Ltd; Mashang Consumer Finanace Co, Ltd; College of Computer Science, Nankai University, Tianjin, China+Tianjin Key Laboratory of Network and Data Security Technology, Tianjin, China", + "aff_domain": "dbis.nankai.edu.cn;dbis.nankai.edu.cn;dbis.nankai.edu.cn;dbis.nankai.edu.cn;msxf.com;msxf.com;nankai.edu.cn", + "email": "dbis.nankai.edu.cn;dbis.nankai.edu.cn;dbis.nankai.edu.cn;dbis.nankai.edu.cn;msxf.com;msxf.com;nankai.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;0+1;2;2;0+1", + "aff_unique_norm": "Nankai University;Tianjin Key Laboratory of Network and Data Security Technology;Mashang Consumer Finance Co., Ltd.", + "aff_unique_dep": "College of Computer Science;Network and Data Security Technology;", + "aff_unique_url": "http://www.nankai.edu.cn;;", + "aff_unique_abbr": "Nankai;;", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Tianjin;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.289", + "title": "A Span-level Bidirectional Network for Aspect Sentiment Triplet Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Aspect Sentiment Triplet Extraction (ASTE) is a new fine-grained sentiment analysis task that aims to extract triplets of aspect terms, sentiments, and opinion terms from review sentences. Recently, span-level models achieve gratifying results on ASTE task by taking advantage of the predictions of all possible spans. Since all possible spans significantly increases the number of potential aspect and opinion candidates, it is crucial and challenging to efficiently extract the triplet elements among them. In this paper, we present a span-level bidirectional network which utilizes all possible spans as input and extracts triplets from spans bidirectionally. Specifically, we devise both the aspect decoder and opinion decoder to decode the span representations and extract triples from aspect-to-opinion and opinion-to-aspect directions. With these two decoders complementing with each other, the whole network can extract triplets from spans more comprehensively. Moreover, considering that mutual exclusion cannot be guaranteed between the spans, we design a similar span separation loss to facilitate the downstream task of distinguishing the correct span by expanding the KL divergence of similar spans during the training process; in the inference process, we adopt an inference strategy to remove conflicting triplets from the results base on their confidence scores. Experimental results show that our framework not only significantly outperforms state-of-the-art methods, but achieves better performance in predicting triplets with multi-token entities and extracting triplets in sentences contain multi-triplets.", + "author": "Yuqi Chen; Chen Keming; Xian Sun; Zequn Zhang", + "authorids": "/y/yuqi-chen/; /c/chen-keming/; /x/xian-sun/; /z/zequn-zhang/", + "bibtex": "@inproceedings{chen-etal-2022-span,\n title = \"A Span-level Bidirectional Network for Aspect Sentiment Triplet Extraction\",\n author = \"Chen, Yuqi and\n Keming, Chen and\n Sun, Xian and\n Zhang, Zequn\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.289/\",\n doi = \"10.18653/v1/2022.emnlp-main.289\",\n pages = \"4300--4309\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.289.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.289/", + "pdf_size": 2893327, + "gs_citation": 55, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10875926529627435694&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Aerospace Information Research Institute; Key Laboratory of Network Information System Technology(NIST); School of Electronic, Electrical and Communication Engineering; University of Chinese Academy of Sciences", + "aff_domain": "mails.ucas.ac.cn;hotmail.com;mail.ie.ac.cn;mail.ie.ac.cn", + "email": "mails.ucas.ac.cn;hotmail.com;mail.ie.ac.cn;mail.ie.ac.cn", + "github": "https://github.com/chen1310054465/SBN", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Aerospace Information Research Institute;Key Laboratory of Network Information System Technology;School of Electronic, Electrical and Communication Engineering;University of Chinese Academy of Sciences", + "aff_unique_dep": ";Network Information System Technology;Electronic, Electrical and Communication Engineering;", + "aff_unique_url": "http://www.airi.cn;;;http://www.ucas.ac.cn", + "aff_unique_abbr": ";NIST;;UCAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "2022.emnlp-main.315", + "title": "A Speaker-Aware Co-Attention Framework for Medical Dialogue Information Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "With the development of medical digitization, the extraction and structuring of Electronic Medical Records (EMRs) have become challenging but fundamental tasks. How to accurately and automatically extract structured information from medical dialogues is especially difficult because the information needs to be inferred from complex interactions between the doctor and the patient. To this end, in this paper, we propose a speaker-aware co-attention framework for medical dialogue information extraction. To better utilize the pre-trained language representation model to perceive the semantics of the utterance and the candidate item, we develop a speaker-aware dialogue encoder with multi-task learning, which considers the speaker\u2019s identity into account. To deal with complex interactions between different utterances and the correlations between utterances and candidate items, we propose a co-attention fusion network to aggregate the utterance information. We evaluate our framework on the public medical dialogue extraction datasets to demonstrate the superiority of our method, which can outperform the state-of-the-art methods by a large margin. Codes will be publicly available upon acceptance.", + "author": "Yuan Xia; Zhenhui Shi; Jingbo Zhou; Jiayu Xu; Chao Lu; Yehui Yang; Lei Wang; Haifeng Huang; Xia Zhang; Junwei Liu", + "authorids": "/y/yuan-xia/; /z/zhenhui-shi/; /j/jingbo-zhou/; /j/jiayu-xu/; /c/chao-lu/; /y/yehui-yang/; /l/lei-wang/; /h/haifeng-huang/; /x/xia-zhang/; /j/junwei-liu/", + "bibtex": "@inproceedings{xia-etal-2022-speaker,\n title = \"A Speaker-Aware Co-Attention Framework for Medical Dialogue Information Extraction\",\n author = \"Xia, Yuan and\n Shi, Zhenhui and\n Zhou, Jingbo and\n Xu, Jiayu and\n Lu, Chao and\n Yang, Yehui and\n Wang, Lei and\n Huang, Haifeng and\n Zhang, Xia and\n Liu, Junwei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.315/\",\n doi = \"10.18653/v1/2022.emnlp-main.315\",\n pages = \"4777--4786\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.315.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.315/", + "pdf_size": 355734, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18118868535031286938&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Neusoft Corporation, China; Baidu Inc., China", + "aff_domain": "baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;neusoft.com;baidu.com", + "email": "baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;neusoft.com;baidu.com", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;0;0;0;1;0", + "aff_unique_norm": "Baidu Inc.;Neusoft Corporation", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.baidu.com;http://www.neusoft.com", + "aff_unique_abbr": "Baidu;Neusoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-industry.58", + "title": "A Stacking-based Efficient Method for Toxic Language Detection on Live Streaming Chat", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "In a live streaming chat on a video streaming service, it is crucial to filter out toxic comments with online processing to prevent users from reading comments in real-time. However, recent toxic language detection methods rely on deep learning methods, which can not be scalable considering inference speed. Also, these methods do not consider constraints of computational resources expected depending on a deployed system (e.g., no GPU resource).This paper presents an efficient method for toxic language detection that is aware of real-world scenarios. Our proposed architecture is based on partial stacking that feeds initial results with low confidence to meta-classifier. Experimental results show that our method achieves a much faster inference speed than BERT-based models with comparable performance.", + "author": "Yuto Oikawa; Yuki Nakayama; Koji Murakami", + "authorids": "/y/yuto-oikawa/; /y/yuki-nakayama/; /k/koji-murakami/", + "bibtex": "@inproceedings{oikawa-etal-2022-stacking,\n title = \"A Stacking-based Efficient Method for Toxic Language Detection on Live Streaming Chat\",\n author = \"Oikawa, Yuto and\n Nakayama, Yuki and\n Murakami, Koji\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.58/\",\n doi = \"10.18653/v1/2022.emnlp-industry.58\",\n pages = \"571--578\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.58.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.58/", + "pdf_size": 843953, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:kx8wZTIYHEMJ:scholar.google.com/&scioq=A+Stacking-based+Efficient+Method+for+Toxic+Language+Detection+on+Live+Streaming+Chat&hl=en&as_sdt=0,5", + "gs_version_total": 0, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.emnlp-main.414", + "title": "A Survey of Active Learning for Natural Language Processing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this work, we provide a literature review of active learning (AL) for its applications in natural language processing (NLP). In addition to a fine-grained categorization of query strategies, we also investigate several other important aspects of applying AL to NLP problems. These include AL for structured prediction tasks, annotation cost, model learning (especially with deep neural models), and starting and stopping AL. Finally, we conclude with a discussion of related topics and future directions.", + "author": "Zhisong Zhang; Emma Strubell; Eduard Hovy", + "authorids": "/z/zhisong-zhang/; /e/emma-strubell/; /e/eduard-hovy/", + "bibtex": "@inproceedings{zhang-etal-2022-survey,\n title = \"A Survey of Active Learning for Natural Language Processing\",\n author = \"Zhang, Zhisong and\n Strubell, Emma and\n Hovy, Eduard\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.414/\",\n doi = \"10.18653/v1/2022.emnlp-main.414\",\n pages = \"6166--6190\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.414.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.414/", + "pdf_size": 374142, + "gs_citation": 119, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3708843748724193942&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University", + "aff_domain": "cs.cmu.edu;cmu.edu;cmu.edu", + "email": "cs.cmu.edu;cmu.edu;cmu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Language Technologies Institute", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.633", + "title": "A Survey of Computational Framing Analysis Approaches", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Framing analysis is predominantly qualitative and quantitative, examining a small dataset with manual coding. Easy access to digital data in the last two decades prompts scholars in both computation and social sciences to utilize various computational methods to explore frames in large-scale datasets. The growing scholarship, however, lacks a comprehensive understanding and resources of computational framing analysis methods. Aiming to address the gap, this article surveys existing computational framing analysis approaches and puts them together. The research is expected to help scholars and journalists gain a deeper understanding of how frames are being explored computationally, better equip them to analyze frames in large-scale datasets, and, finally, work on advancing methodological approaches.", + "author": "Mohammad Ali; Naeemul Hassan", + "authorids": "/m/mohammad-ali/; /n/naeemul-hassan/", + "bibtex": "@inproceedings{ali-hassan-2022-survey,\n title = \"A Survey of Computational Framing Analysis Approaches\",\n author = \"Ali, Mohammad and\n Hassan, Naeemul\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.633/\",\n doi = \"10.18653/v1/2022.emnlp-main.633\",\n pages = \"9335--9348\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.633.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.633/", + "pdf_size": 370182, + "gs_citation": 45, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8581338644679436470&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "College of Information Studies, University of Maryland, College Park; Philip Merrill College of Journalism, College of Information Studies, University of Maryland, College Park", + "aff_domain": "umd.edu;umd.edu", + "email": "umd.edu;umd.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Maryland", + "aff_unique_dep": "College of Information Studies", + "aff_unique_url": "https://www/umd.edu", + "aff_unique_abbr": "UMD", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "College Park", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.812", + "title": "A Systematic Investigation of Commonsense Knowledge in Large Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Language models (LMs) trained on large amounts of data have shown impressive performance on many NLP tasks under the zero-shot and few-shot setup. Here we aim to better understand the extent to which such models learn commonsense knowledge \u2014 a critical component of many NLP applications. We conduct a systematic and rigorous zero-shot and few-shot commonsense evaluation of large pre-trained LMs, where we: (i) carefully control for the LMs\u2019 ability to exploit potential surface cues and annotation artefacts, and (ii) account for variations in performance that arise from factors that are not related to commonsense knowledge. Our findings highlight the limitations of pre-trained LMs in acquiring commonsense knowledge without task-specific supervision; furthermore, using larger models or few-shot evaluation is insufficient to achieve human-level commonsense performance.", + "author": "Xiang Lorraine Li; Adhiguna Kuncoro; Jordan Hoffmann; Cyprien de Masson d\u2019Autume; Phil Blunsom; Aida Nematzadeh", + "authorids": "/x/xiang-lorraine-li/; /a/adhiguna-kuncoro/; /j/jordan-hoffmann/; /c/cyprien-de-masson-dautume/; /p/phil-blunsom/; /a/aida-nematzadeh/", + "bibtex": "@inproceedings{li-etal-2022-systematic,\n title = \"A Systematic Investigation of Commonsense Knowledge in Large Language Models\",\n author = \"Li, Xiang Lorraine and\n Kuncoro, Adhiguna and\n Hoffmann, Jordan and\n de Masson d{'}Autume, Cyprien and\n Blunsom, Phil and\n Nematzadeh, Aida\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.812/\",\n doi = \"10.18653/v1/2022.emnlp-main.812\",\n pages = \"11838--11855\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.812.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.812/", + "pdf_size": 2013471, + "gs_citation": 72, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7981313756844044320&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Allen Institute for Artificial Intelligence; DeepMind; Inflection AI; Reka; Cohere; University of Oxford", + "aff_domain": "allenai.org; ; ; ;google.com; ", + "email": "allenai.org; ; ; ;google.com; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;5", + "aff_unique_norm": "Allen Institute for Artificial Intelligence;DeepMind;Inflection AI;Reka;Cohere;University of Oxford", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "https://allenai.org;https://deepmind.com;https://www.inflection.ai;;https://cohere.ai;https://www.ox.ac.uk", + "aff_unique_abbr": "AI2;DeepMind;Inflection AI;;;Oxford", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;1", + "aff_country_unique": "United States;United Kingdom;" + }, + { + "id": "2022.emnlp-main.240", + "title": "A Template-based Method for Constrained Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Machine translation systems are expected to cope with various types of constraints in many practical scenarios. While neural machine translation (NMT) has achieved strong performance in unconstrained cases, it is non-trivial to impose pre-specified constraints into the translation process of NMT models. Although many approaches have been proposed to address this issue, most existing methods can not satisfy the following three desiderata at the same time: (1) high translation quality, (2) high match accuracy, and (3) low latency. In this work, we propose a template-based method that can yield results with high translation quality and match accuracy and the inference speed of our method is comparable with unconstrained NMT models. Our basic idea is to rearrange the generation of constrained and unconstrained tokens through a template. Our method does not require any changes in the model architecture and the decoding algorithm. Experimental results show that the proposed template-based approach can outperform several representative baselines in both lexically and structurally constrained translation tasks.", + "author": "Shuo Wang; Peng Li; Zhixing Tan; Zhaopeng Tu; Maosong Sun; Yang Liu", + "authorids": "/s/shuo-wang/; /p/peng-li/; /z/zhixing-tan/; /z/zhaopeng-tu/; /m/maosong-sun/; /y/yang-liu/", + "bibtex": "@inproceedings{wang-etal-2022-template,\n title = \"A Template-based Method for Constrained Neural Machine Translation\",\n author = \"Wang, Shuo and\n Li, Peng and\n Tan, Zhixing and\n Tu, Zhaopeng and\n Sun, Maosong and\n Liu, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.240/\",\n doi = \"10.18653/v1/2022.emnlp-main.240\",\n pages = \"3665--3679\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.240.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.240/", + "pdf_size": 2072333, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7787239980817811245&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 6, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/THUNLP-MT/Template-NMT", + "project": "", + "author_num": 6 + }, + { + "id": "2022.findings-emnlp.408", + "title": "A Two-Stage Approach towards Generalization in Knowledge Base Question Answering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Most existing approaches for Knowledge Base Question Answering (KBQA) focus on a specific underlying knowledge base either because of inherent assumptions in the approach, or because evaluating it on a different knowledge base requires non-trivial changes. However, many popular knowledge bases share similarities in their underlying schemas that can be leveraged to facilitate generalization across knowledge bases. To achieve this generalization, we introduce a KBQA framework based on a 2-stage architecture that explicitly separates semantic parsing from the knowledge base interaction, facilitating transfer learning across datasets and knowledge graphs. We show that pretraining on datasets with a different underlying knowledge base can nevertheless provide significant performance gains and reduce sample complexity. Our approach achieves comparable or state-of-the-art performance for LC-QuAD (DBpedia), WebQSP (Freebase), SimpleQuestions (Wikidata) and MetaQA (Wikimovies-KG).", + "author": "Srinivas Ravishankar; Dung Thai; Ibrahim Abdelaziz; Nandana Mihindukulasooriya; Tahira Naseem; Pavan Kapanipathi; Gaetano Rossiello; Achille Fokoue", + "authorids": "/s/srinivas-ravishankar/; /d/dung-thai/; /i/ibrahim-abdelaziz/; /n/nandana-mihindukulasooriya/; /t/tahira-naseem/; /p/pavan-kapanipathi/; /g/gaetano-rossiello/; /a/achille-fokoue-nkoutche/", + "bibtex": "@inproceedings{ravishankar-etal-2022-two,\n title = \"A Two-Stage Approach towards Generalization in Knowledge Base Question Answering\",\n author = \"Ravishankar, Srinivas and\n Thai, Dung and\n Abdelaziz, Ibrahim and\n Mihindukulasooriya, Nandana and\n Naseem, Tahira and\n Kapanipathi, Pavan and\n Rossiello, Gaetano and\n Fokoue, Achille\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.408/\",\n doi = \"10.18653/v1/2022.findings-emnlp.408\",\n pages = \"5571--5580\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.408.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.408/", + "pdf_size": 475304, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10773241285885083940&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.277", + "title": "A Unified Dialogue User Simulator for Few-shot Data Augmentation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Pre-trained language models have shown superior performance in task-oriented dialogues. However, existing datasets are on limited scales, which cannot support large-scale pre-training. Fortunately, various data augmentation methods have been developed to augment large-scale task-oriented dialogue corpora. However, they heavily rely on annotated data in the target domain, which require a tremendous amount of data collection and human labeling work. In this paper, we build a unified dialogue user simulation model by pre-training on several publicly available datasets. The model can then be tuned on a target domain with few-shot data. The experiments on a target dataset across multiple domains show that our proposed model brings remarkable performance increases through data augmentation.", + "author": "Dazhen Wan; Zheng Zhang; Qi Zhu; Lizi Liao; Minlie Huang", + "authorids": "/d/dazhen-wan/; /z/zheng-zhang/; /q/qi-zhu/; /l/lizi-liao/; /m/minlie-huang/", + "bibtex": "@inproceedings{wan-etal-2022-unified,\n title = \"A Unified Dialogue User Simulator for Few-shot Data Augmentation\",\n author = \"Wan, Dazhen and\n Zhang, Zheng and\n Zhu, Qi and\n Liao, Lizi and\n Huang, Minlie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.277/\",\n doi = \"10.18653/v1/2022.findings-emnlp.277\",\n pages = \"3788--3799\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.277.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.277/", + "pdf_size": 308190, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1425863453324884630&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "CoAI Group, DCST, IAI, BNRIST, Tsinghua University; CoAI Group, DCST, IAI, BNRIST, Tsinghua University; CoAI Group, DCST, IAI, BNRIST, Tsinghua University; Singapore Management University; CoAI Group, DCST, IAI, BNRIST, Tsinghua University", + "aff_domain": "tsinghua.org.cn;gmail.com;gmail.com;smu.edu.sg;tsinghua.edu.cn", + "email": "tsinghua.org.cn;gmail.com;gmail.com;smu.edu.sg;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Tsinghua University;Singapore Management University", + "aff_unique_dep": "CoAI Group, DCST, IAI, BNRIST;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.smu.edu.sg", + "aff_unique_abbr": "THU;SMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.emnlp-main.43", + "title": "A Unified Encoder-Decoder Framework with Entity Memory", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Entities, as important carriers of real-world knowledge, play a key role in many NLP tasks.We focus on incorporating entity knowledge into an encoder-decoder framework for informative text generation. Existing approaches tried to index, retrieve, and read external documents as evidence, but they suffered from a large computational overhead. In this work, we propose an encoder-decoder framework with an entity memory, namely EDMem. The entity knowledge is stored in the memory as latent representations, and the memory is pre-trained on Wikipedia along with encoder-decoder parameters. To precisely generate entity names, we design three decoding methods to constrain entity generation by linking entities in the memory. EDMem is a unified framework that can be used on various entity-intensive question answering and generation tasks. Extensive experimental results show that EDMem outperforms both memory-based auto-encoder models and non-memory encoder-decoder models.", + "author": "Zhihan Zhang; Wenhao Yu; Chenguang Zhu; Meng Jiang", + "authorids": "/z/zhihan-zhang/; /w/wenhao-yu/; /c/chenguang-zhu/; /m/meng-jiang/", + "bibtex": "@inproceedings{zhang-etal-2022-unified,\n title = \"A Unified Encoder-Decoder Framework with Entity Memory\",\n author = \"Zhang, Zhihan and\n Yu, Wenhao and\n Zhu, Chenguang and\n Jiang, Meng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.43/\",\n doi = \"10.18653/v1/2022.emnlp-main.43\",\n pages = \"689--705\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.43.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.43/", + "pdf_size": 448251, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16416402625505925604&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 6, + "aff": "University of Notre Dame, Notre Dame, IN, USA; University of Notre Dame, Notre Dame, IN, USA; Microsoft Cognitive Services Research, Redmond, WA, USA; University of Notre Dame, Notre Dame, IN, USA", + "aff_domain": "nd.edu;nd.edu;microsoft.com;nd.edu", + "email": "nd.edu;nd.edu;microsoft.com;nd.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "University of Notre Dame;Microsoft", + "aff_unique_dep": ";Cognitive Services Research", + "aff_unique_url": "https://www.nd.edu;https://www.microsoft.com", + "aff_unique_abbr": "Notre Dame;Microsoft", + "aff_campus_unique_index": "0;0;1;0", + "aff_campus_unique": "Notre Dame;Redmond", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.237", + "title": "A Unified Framework for Pun Generation with Humor Principles", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We propose a unified framework to generate both homophonic and homographic puns to resolve the split-up in existing works. Specifically, we incorporate three linguistic attributes of puns to the language models: ambiguity, distinctiveness, and surprise. Our framework consists of three parts: 1) a context words/phrases selector to promote the aforementioned attributes, 2) a generation model trained on non-pun sentences to incorporate the context words/phrases into the generation output, and 3) a label predictor that learns the structure of puns which is used to steer the generation model at inference time. Evaluation results on both pun types demonstrate the efficacy of our model over strong baselines.", + "author": "Yufei Tian; Divyanshu Sheth; Nanyun Peng", + "authorids": "/y/yufei-tian/; /d/divyanshu-sheth/; /n/nanyun-peng/", + "bibtex": "@inproceedings{tian-etal-2022-unified,\n title = \"A Unified Framework for Pun Generation with Humor Principles\",\n author = \"Tian, Yufei and\n Sheth, Divyanshu and\n Peng, Nanyun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.237/\",\n doi = \"10.18653/v1/2022.findings-emnlp.237\",\n pages = \"3253--3261\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.237.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.237/", + "pdf_size": 386974, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7981599476226744481&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "https://github.com/PlusLabNLP/Unified_PunGen", + "project": "", + "author_num": 3 + }, + { + "id": "2022.emnlp-main.504", + "title": "A Unified Neural Network Model for Readability Assessment with Feature Projection and Length-Balanced Loss", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Readability assessment is a basic research task in the field of education. Traditional methods mainly employ machine learning classifiers with hundreds of linguistic features. Although the deep learning model has become the prominent approach for almost all NLP tasks, it is less explored for readability assessment. In this paper, we propose a BERT-based model with feature projection and length-balanced loss (BERT-FP-LBL) to determine the difficulty level of a given text. First, we introduce topic features guided by difficulty knowledge to complement the traditional linguistic features. From the linguistic features, we extract really useful orthogonal features to supplement BERT representations by means of projection filtering. Furthermore, we design a length-balanced loss to handle the greatly varying length distribution of the readability data. We conduct experiments on three English benchmark datasets and one Chinese dataset, and the experimental results show that our proposed model achieves significant improvements over baseline models. Interestingly, our proposed model achieves comparable results with human experts in consistency test.", + "author": "Wenbiao Li; Wang Ziyang; Yunfang Wu", + "authorids": "/w/wenbiao-li/; /w/wang-ziyang/; /y/yunfang-wu/", + "bibtex": "@inproceedings{li-etal-2022-unified,\n title = \"A Unified Neural Network Model for Readability Assessment with Feature Projection and Length-Balanced Loss\",\n author = \"Li, Wenbiao and\n Ziyang, Wang and\n Wu, Yunfang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.504/\",\n doi = \"10.18653/v1/2022.emnlp-main.504\",\n pages = \"7446--7457\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.504.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.504/", + "pdf_size": 3905666, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3257643240466628779&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "MOE Key Laboratory of Computational Linguistics, Peking University + School of Software and Microelectronics, Peking University, Beijing, China; MOE Key Laboratory of Computational Linguistics, Peking University + School of Software and Microelectronics, Peking University, Beijing, China; School of Computer Science, Peking University, Beijing, China", + "aff_domain": "stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "MOE Key Laboratory of Computational Linguistics", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.276", + "title": "A Unified Positive-Unlabeled Learning Framework for Document-Level Relation Extraction with Different Levels of Labeling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Document-level relation extraction (RE) aims to identify relations between entities across multiple sentences. Most previous methods focused on document-level RE under full supervision. However, in real-world scenario, it is expensive and difficult to completely label all relations in a document because the number of entity pairs in document-level RE grows quadratically with the number of entities. To solve the common incomplete labeling problem, we propose a unified positive-unlabeled learning framework - shift and squared ranking loss positive-unlabeled (SSR-PU) learning. We use positive-unlabeled (PU) learning on document-level RE for the first time. Considering that labeled data of a dataset may lead to prior shift of unlabeled data, we introduce a PU learning under prior shift of training data. Also, using none-class score as an adaptive threshold, we propose squared ranking loss and prove its Bayesian consistency with multi-label ranking metrics. Extensive experiments demonstrate that our method achieves an improvement of about 14 F1 points relative to the previous baseline with incomplete labeling. In addition, it outperforms previous state-of-the-art results under both fully supervised and extremely unlabeled settings as well.", + "author": "Ye Wang; Xinxin Liu; Wenxin Hu; Tao Zhang", + "authorids": "/y/ye-wang/; /x/xinxin-liu/; /w/wenxin-hu/; /t/tao-zhang/", + "bibtex": "@inproceedings{wang-etal-2022-unified,\n title = \"A Unified Positive-Unlabeled Learning Framework for Document-Level Relation Extraction with Different Levels of Labeling\",\n author = \"Wang, Ye and\n Liu, Xinxin and\n Hu, Wenxin and\n Zhang, Tao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.276/\",\n doi = \"10.18653/v1/2022.emnlp-main.276\",\n pages = \"4123--4135\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.276.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.276/", + "pdf_size": 627279, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3031154056637059037&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "East China Normal University; East China Normal University; East China Normal University; Tsinghua University", + "aff_domain": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;cc.ecnu.edu.cn;mails.tsinghua.edu.cn", + "email": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;cc.ecnu.edu.cn;mails.tsinghua.edu.cn", + "github": "https://github.com/www-Ye/SSR-PU", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "East China Normal University;Tsinghua University", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "ECNU;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.579", + "title": "ACENet: Attention Guided Commonsense Reasoning on Hybrid Knowledge Graph", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Augmenting pre-trained language models (PLMs) with knowledge graphs (KGs) has demonstrated superior performance on commonsense reasoning. Given a commonsense based QA context (question and multiple choices), existing approaches usually estimate the plausibility of candidate choices separately based on their respective retrieved KGs, without considering the interference among different choices. In this paper, we propose an Attention guided Commonsense rEasoning Network (ACENet) to endow the neural network with the capability of integrating hybrid knowledge. Specifically, our model applies the multi-layer interaction of answer choices to continually strengthen correct choice information and guide the message passing of GNN. In addition, we also design a mix attention mechanism of nodes and edges to iteratively select supporting evidence on hybrid knowledge graph. Experimental results demonstrate the effectiveness of our proposed model through considerable performance gains across CommonsenseQA and OpenbookQA datasets.", + "author": "Chuzhan Hao; Minghui Xie; Peng Zhang", + "authorids": "/c/chuzhan-hao/; /m/minghui-xie/; /p/peng-zhang/", + "bibtex": "@inproceedings{hao-etal-2022-acenet,\n title = \"{ACEN}et: Attention Guided Commonsense Reasoning on Hybrid Knowledge Graph\",\n author = \"Hao, Chuzhan and\n Xie, Minghui and\n Zhang, Peng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.579/\",\n doi = \"10.18653/v1/2022.emnlp-main.579\",\n pages = \"8461--8471\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.579.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.579/", + "pdf_size": 2471942, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=417733313422664728&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University", + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn", + "github": "https://github.com/HAOChuzhan/ACENet", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Tianjin University", + "aff_unique_dep": "College of Intelligence and Computing", + "aff_unique_url": "http://www.tju.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.440", + "title": "ADDMU: Detection of Far-Boundary Adversarial Examples with Data and Model Uncertainty Estimation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Adversarial Examples Detection (AED) is a crucial defense technique against adversarial attacks and has drawn increasing attention from the Natural Language Processing (NLP) community. Despite the surge of new AED methods, our studies show that existing methods heavily rely on a shortcut to achieve good performance. In other words, current search-based adversarial attacks in NLP stop once model predictions change, and thus most adversarial examples generated by those attacks are located near model decision boundaries. To surpass this shortcut and fairly evaluate AED methods, we propose to test AED methods with Far Boundary (FB) adversarial examples. Existing methods show worse than random guess performance under this scenario. To overcome this limitation, we propose a new technique, ADDMU, adversary detection with data and model uncertainty, which combines two types of uncertainty estimation for both regular and FB adversarial example detection. Our new method outperforms previous methods by 3.6 and 6.0 AUC points under each scenario. Finally, our analysis shows that the two types of uncertainty provided by ADDMU can be leveraged to characterize adversarialexamples and identify the ones that contribute most to model\u2019s robustness in adversarial training.", + "author": "Fan Yin; Yao Li; Cho-Jui Hsieh; Kai-Wei Chang", + "authorids": "/f/fan-yin/; /y/yao-li/; /c/cho-jui-hsieh/; /k/kai-wei-chang/", + "bibtex": "@inproceedings{yin-etal-2022-addmu,\n title = \"{ADDMU}: Detection of Far-Boundary Adversarial Examples with Data and Model Uncertainty Estimation\",\n author = \"Yin, Fan and\n Li, Yao and\n Hsieh, Cho-Jui and\n Chang, Kai-Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.440/\",\n doi = \"10.18653/v1/2022.emnlp-main.440\",\n pages = \"6567--6584\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.440.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.440/", + "pdf_size": 499716, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13491447829375104276&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 8, + "aff": "University of California, Los Angeles; University of North Carolina, Chapel Hill; University of California, Los Angeles; University of California, Los Angeles", + "aff_domain": "cs.ucla.edu;email.unc.edu;cs.ucla.edu;cs.ucla.edu", + "email": "cs.ucla.edu;email.unc.edu;cs.ucla.edu;cs.ucla.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of California, Los Angeles;University of North Carolina", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucla.edu;https://www.unc.edu", + "aff_unique_abbr": "UCLA;UNC", + "aff_campus_unique_index": "0;1;0;0", + "aff_campus_unique": "Los Angeles;Chapel Hill", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.343", + "title": "AEG: Argumentative Essay Generation via A Dual-Decoder Model with Content Planning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Argument generation is an important but challenging task in computational argumentation.Existing studies have mainly focused on generating individual short arguments, while research on generating long and coherent argumentative essays is still under-explored.In this paper, we propose a new task, Argumentative Essay Generation (AEG).Given a writing prompt, the goal of AEG is to automatically generate an argumentative essay with strong persuasiveness.We construct a large-scale dataset, ArgEssay, for this new task and establish a strong model based on a dual-decoder Transformer architecture.Our proposed model contains two decoders, a planning decoder (PD) and a writing decoder (WD), where PD is used to generate a sequence for essay content planning and WD incorporates the planning information to write an essay.Further, we pre-train this model on a large news dataset to enhance the plan-and-write paradigm.Automatic and human evaluation results show that our model can generate more coherent and persuasive essays with higher diversity and less repetition compared to several baselines.", + "author": "Jianzhu Bao; Yasheng Wang; Yitong Li; Fei Mi; Ruifeng Xu", + "authorids": "/j/jianzhu-bao/; /y/yasheng-wang/; /y/yitong-li/; /f/fei-mi/; /r/ruifeng-xu/", + "bibtex": "@inproceedings{bao-etal-2022-aeg,\n title = \"{AEG}: Argumentative Essay Generation via A Dual-Decoder Model with Content Planning\",\n author = \"Bao, Jianzhu and\n Wang, Yasheng and\n Li, Yitong and\n Mi, Fei and\n Xu, Ruifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.343/\",\n doi = \"10.18653/v1/2022.emnlp-main.343\",\n pages = \"5134--5148\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.343.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.343/", + "pdf_size": 523029, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5150474844344206937&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab+Huawei Technologies Co., Ltd.; Huawei Noah\u2019s Ark Lab; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "gmail.com;huawei.com;huawei.com;huawei.com;hit.edu.cn", + "email": "gmail.com;huawei.com;huawei.com;huawei.com;hit.edu.cn", + "github": "https://github.com/HITSZ-HLT/AEG", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+2;3;3+4;3;0+1+2", + "aff_unique_norm": "Harbin Institute of Technology;Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies;Peng Cheng Laboratory;Huawei;Huawei Technologies", + "aff_unique_dep": ";Provincial Key Laboratory of Novel Security Intelligence Technologies;;Noah\u2019s Ark Lab;", + "aff_unique_url": "http://en.hhit.edu.cn/;;;https://www.huawei.com;https://www.huawei.com", + "aff_unique_abbr": "HIT;;;Huawei;Huawei", + "aff_campus_unique_index": "0+0;;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0+0;0;0+0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.636", + "title": "ALFRED-L: Investigating the Role of Language for Action Learning in Interactive Visual Environments", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Embodied Vision and Language Task Completion requires an embodied agent to interpret natural language instructions and egocentric visual observations to navigate through and interact with environments. In this work, we examine ALFRED, a challenging benchmark for embodied task completion, with the goal of gaining insight into how effectively models utilize language. We find evidence that sequence-to-sequence and transformer-based models trained on this benchmark are not sufficiently sensitive to changes in input language instructions. Next, we construct a new test split \u2013 ALFRED-L to test whether ALFRED models can generalize to task structures not seen during training that intuitively require the same types of language understanding required in ALFRED. Evaluation of existing models on ALFRED-L suggests that (a) models are overly reliant on the sequence in which objects are visited in typical ALFRED trajectories and fail to adapt to modifications of this sequence and (b) models trained with additional augmented trajectories are able to adapt relatively better to such changes in input language instructions.", + "author": "Arjun Akula; Spandana Gella; Aishwarya Padmakumar; Mahdi Namazifar; Mohit Bansal; Jesse Thomason; Dilek Hakkani-Tur", + "authorids": "/a/arjun-akula/; /s/spandana-gella/; /a/aishwarya-padmakumar/; /m/mahdi-namazifar/; /m/mohit-bansal/; /j/jesse-thomason/; /d/dilek-hakkani-tur/", + "bibtex": "@inproceedings{akula-etal-2022-alfred,\n title = \"{ALFRED}-{L}: Investigating the Role of Language for Action Learning in Interactive Visual Environments\",\n author = \"Akula, Arjun and\n Gella, Spandana and\n Padmakumar, Aishwarya and\n Namazifar, Mahdi and\n Bansal, Mohit and\n Thomason, Jesse and\n Hakkani-Tur, Dilek\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.636/\",\n doi = \"10.18653/v1/2022.emnlp-main.636\",\n pages = \"9369--9378\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.636.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.636/", + "pdf_size": 913145, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1578159425216940656&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff": "Google AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; University of North Carolina at Chapel Hill + Amazon Alexa AI; University of Southern California + Amazon Alexa AI; Amazon Alexa AI", + "aff_domain": "google.com;amazon.com;amazon.com;amazon.com;cs.unc.edu;usc.edu;amazon.com", + "email": "google.com;amazon.com;amazon.com;amazon.com;cs.unc.edu;usc.edu;amazon.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;1;2+1;3+1;1", + "aff_unique_norm": "Google;Amazon;University of North Carolina;University of Southern California", + "aff_unique_dep": "Google AI;Alexa AI;;", + "aff_unique_url": "https://ai.google;https://www.amazon.com;https://www.unc.edu;https://www.usc.edu", + "aff_unique_abbr": "Google AI;Amazon;UNC;USC", + "aff_campus_unique_index": "0;2;3", + "aff_campus_unique": "Mountain View;;Chapel Hill;Los Angeles", + "aff_country_unique_index": "0;0;0;0;0+0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.709", + "title": "AMAL: Meta Knowledge-Driven Few-Shot Adapter Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "NLP has advanced greatly together with the proliferation of Transformer-based pre-trained language models. To adapt to a downstream task, the pre-trained language models need to be fine-tuned with a sufficient supply of annotated examples. In recent years, Adapter-based fine-tuning methods have expanded the applicability of pre-trained language models by substantially lowering the required amount of annotated examples. However, existing Adapter-based methods still fail to yield meaningful results in the few-shot regime where only a few annotated examples are provided. In this study, we present a meta-learning-driven low-rank adapter pooling method, called AMAL, for leveraging pre-trained language models even with just a few data points. We evaluate our method on five text classification benchmark datasets. The results show that AMAL significantly outperforms previous few-shot learning methods and achieves a new state-of-the-art.", + "author": "S. K. Hong; Tae Young Jang", + "authorids": "/s/s-k-hong/; /t/tae-young-jang/", + "bibtex": "@inproceedings{hong-jang-2022-amal,\n title = \"{AMAL}: Meta Knowledge-Driven Few-Shot Adapter Learning\",\n author = \"Hong, S. K. and\n Jang, Tae Young\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.709/\",\n doi = \"10.18653/v1/2022.emnlp-main.709\",\n pages = \"10381--10389\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.709.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.709/", + "pdf_size": 873538, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3802110576888159836&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Samsung SDS; Samsung SDS", + "aff_domain": "samsung.com;samsung.com", + "email": "samsung.com;samsung.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Samsung SDS", + "aff_unique_dep": "", + "aff_unique_url": "https://www.samsungsds.com", + "aff_unique_abbr": "Samsung SDS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.525", + "title": "APEACH: Attacking Pejorative Expressions with Analysis on Crowd-Generated Hate Speech Evaluation Datasets", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In hate speech detection, developing training and evaluation datasets across various domains is the critical issue. Whereas, major approaches crawl social media texts and hire crowd-workers to annotate the data. Following this convention often restricts the scope of pejorative expressions to a single domain lacking generalization. Sometimes domain overlap between training corpus and evaluation set overestimate the prediction performance when pretraining language models on low-data language. To alleviate these problems in Korean, we propose APEACH that asks unspecified users to generate hate speech examples followed by minimal post-labeling. We find that APEACH can collect useful datasets that are less sensitive to the lexical overlaps between the pretraining corpus and the evaluation set, thereby properly measuring the model performance.", + "author": "Kichang Yang; Wonjun Jang; Won Ik Cho", + "authorids": "/k/kichang-yang/; /w/wonjun-jang/; /w/won-ik-cho/", + "bibtex": "@inproceedings{yang-etal-2022-apeach,\n title = \"{APEACH}: Attacking Pejorative Expressions with Analysis on Crowd-Generated Hate Speech Evaluation Datasets\",\n author = \"Yang, Kichang and\n Jang, Wonjun and\n Cho, Won Ik\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.525/\",\n doi = \"10.18653/v1/2022.findings-emnlp.525\",\n pages = \"7076--7086\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.525.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.525/", + "pdf_size": 1790539, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4110277834543603949&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Kakao Corp. + Kakao Enterprise Corp. + School of Software, Soongsil University; Kakao Corp. + School of Software, Soongsil University; Dept. of ECE, Seoul National University", + "aff_domain": "gmail.com;kakaocorp.com;snu.ac.kr", + "email": "gmail.com;kakaocorp.com;snu.ac.kr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1+2;0+2;3", + "aff_unique_norm": "Kakao Corp.;Kakao Enterprise Corp.;Soongsil University;Seoul National University", + "aff_unique_dep": ";;School of Software;Dept. of Electrical and Computer Engineering", + "aff_unique_url": "https://www.kakao.com;https://www.kakaoenterprisecorp.com;https://www.soongsil.ac.kr;https://www.snu.ac.kr", + "aff_unique_abbr": "Kakao;KEC;Soongsil;SNU", + "aff_campus_unique_index": ";;1", + "aff_campus_unique": ";Seoul", + "aff_country_unique_index": "0+0+0;0+0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.62", + "title": "ARTIST: A Transformer-based Chinese Text-to-Image Synthesizer Digesting Linguistic and World Knowledge", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Text-to-Image Synthesis (TIS) is a popular task to convert natural language texts into realistic images. Recently, transformer-based TIS models (such as DALL-E) have been proposed using the encoder-decoder architectures. Yet, these billion-scale TIS models are difficult to tune and deploy in resource-constrained environments. In addition, there is a lack of language-specific TIS benchmarks for Chinese, together with high-performing models with moderate sizes. In this work, we present ARTIST, A tRansformer-based Chinese Text-to-Image SynThesizer for high-resolution image generation. In ARTIST, the rich linguistic and relational knowledge facts are injected into the model to ensure better model performance without the usage of ultra-large models. We further establish a large-scale Chinese TIS benchmark with the re-production results of state-of-the-art transformer-based TIS models.Results show ARTIST outperforms previous approaches.", + "author": "Tingting Liu; Chengyu Wang; Xiangru Zhu; Lei Li; Minghui Qiu; Jun Huang; Ming Gao; Yanghua Xiao", + "authorids": "/t/tingting-liu/; /c/chengyu-wang/; /x/xiangru-zhu/; /l/lei-li/; /m/minghui-qiu/; /j/jun-huang/; /m/ming-gao/; /y/yanghua-xiao/", + "bibtex": "@inproceedings{liu-etal-2022-artist,\n title = \"{ARTIST}: A Transformer-based {C}hinese Text-to-Image Synthesizer Digesting Linguistic and World Knowledge\",\n author = \"Liu, Tingting and\n Wang, Chengyu and\n Zhu, Xiangru and\n Li, Lei and\n Qiu, Minghui and\n Huang, Jun and\n Gao, Ming and\n Xiao, Yanghua\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.62/\",\n doi = \"10.18653/v1/2022.findings-emnlp.62\",\n pages = \"881--888\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.62.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.62/", + "pdf_size": 1358836, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16833373895619879094&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/alibaba/EasyNLP", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.136", + "title": "ASDOT: Any-Shot Data-to-Text Generation with Pretrained Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Data-to-text generation is challenging due to the great variety of the input data in terms of domains (e.g., finance vs sports) or schemata (e.g., diverse predicates). Recent end-to-end neural methods thus require substantial training examples to learn to disambiguate and describe the data. Yet, real-world data-to-text problems often suffer from various data-scarce issues: one may have access to only a handful of or no training examples, and/or have to rely on examples in a different domain or schema. To fill this gap, we propose Any-Shot Data-to-Text (ASDOT), a new approach flexibly applicable to diverse settings by making efficient use of any given (or no) examples. ASDOT consists of two steps, data disambiguation and sentence fusion, both of which are amenable to be solved with off-the-shelf pretrained language models (LMs) with optional finetuning. In the data disambiguation stage, we employ the prompted GPT-3 model to understand possibly ambiguous triples from the input data and convert each into a short sentence with reduced ambiguity. The sentence fusion stage then uses an LM like T5 to fuse all the resulting sentences into a coherent paragraph as the final description. We evaluate extensively on various datasets in different scenarios, including the zero-/few-/full-shot settings, and generalization to unseen predicates and out-of-domain data. Experimental results show that ASDOT consistently achieves significant improvement over baselines, e.g., a 30.81 BLEU gain on the DART dataset under the zero-shot setting.", + "author": "Jiannan Xiang; Zhengzhong Liu; Yucheng Zhou; Eric Xing; Zhiting Hu", + "authorids": "/j/jiannan-xiang/; /z/zhengzhong-liu/; /y/yucheng-zhou/; /e/eric-xing/; /z/zhiting-hu/", + "bibtex": "@inproceedings{xiang-etal-2022-asdot,\n title = \"{ASDOT}: Any-Shot Data-to-Text Generation with Pretrained Language Models\",\n author = \"Xiang, Jiannan and\n Liu, Zhengzhong and\n Zhou, Yucheng and\n Xing, Eric and\n Hu, Zhiting\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.136/\",\n doi = \"10.18653/v1/2022.findings-emnlp.136\",\n pages = \"1886--1899\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.136.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.136/", + "pdf_size": 522696, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14472032080501097924&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Carnegie Mellon University+Petuum Inc.+Mohamed Bin Zayed University of Artificial Intelligence; Carnegie Mellon University+Petuum Inc.+Mohamed Bin Zayed University of Artificial Intelligence; UC San Diego; Carnegie Mellon University+Petuum Inc.+Mohamed Bin Zayed University of Artificial Intelligence; UC San Diego", + "aff_domain": "andrew.cmu.edu;andrew.cmu.edu;ucsd.edu;andrew.cmu.edu;ucsd.edu", + "email": "andrew.cmu.edu;andrew.cmu.edu;ucsd.edu;andrew.cmu.edu;ucsd.edu", + "github": "https://github.com/szxiangjn/any-shot-data2text", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+2;0+1+2;3;0+1+2;3", + "aff_unique_norm": "Carnegie Mellon University;Petuum Inc.;Mohamed Bin Zayed University of Artificial Intelligence;University of California, San Diego", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.cmu.edu;https://www.petuum.com;https://www.mbzuai.ac.ae;https://www.ucsd.edu", + "aff_unique_abbr": "CMU;;MBZUAI;UCSD", + "aff_campus_unique_index": ";;1;;1", + "aff_campus_unique": ";San Diego", + "aff_country_unique_index": "0+0+1;0+0+1;0;0+0+1;0", + "aff_country_unique": "United States;United Arab Emirates" + }, + { + "id": "2022.emnlp-main.566", + "title": "ASQA: Factoid Questions Meet Long-Form Answers", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent progress on open domain factoid question answering (QA) does not easily transfer to the task of long-form QA, where the goal is to answer questions that require in-depth explanations. The hurdles include a lack of high-quality data and the absence of a well-defined notion of an answer\u2019s quality. In this work, we address these problems by releasing a novel dataset and a task that we call ASQA (Answer Summaries for Questions which are Ambiguous); and proposing a reliable metric for measuring performance on ASQA. Our task focuses on ambiguous factoid questions which have different correct answers depending on the interpretation. Answers to ambiguous questions should combine factual information from multiple sources into a coherent long-form summary that resolves the ambiguity. In contrast to existing long-form QA tasks (such as ELI5), ASQA admits a clear notion of correctness: a user faced with a good summary should be able to answer different interpretations of the original ambiguous question. Our analysis demonstrates an agreement between this metric and human judgments, and reveals a considerable gap between human performance and strong baselines.", + "author": "Ivan Stelmakh; Yi Luan; Bhuwan Dhingra; Ming-Wei Chang", + "authorids": "/i/ivan-stelmakh/; /y/yi-luan/; /b/bhuwan-dhingra/; /m/ming-wei-chang/", + "bibtex": "@inproceedings{stelmakh-etal-2022-asqa,\n title = \"{ASQA}: Factoid Questions Meet Long-Form Answers\",\n author = \"Stelmakh, Ivan and\n Luan, Yi and\n Dhingra, Bhuwan and\n Chang, Ming-Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.566/\",\n doi = \"10.18653/v1/2022.emnlp-main.566\",\n pages = \"8273--8288\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.566.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.566/", + "pdf_size": 917059, + "gs_citation": 170, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14540487061645953393&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff": "Yakov & Partners; Duke University; Google Research; Google Research", + "aff_domain": "icloud.com;google.com;google.com;google.com", + "email": "icloud.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;2", + "aff_unique_norm": "Yakov & Partners;Duke University;Google", + "aff_unique_dep": ";;Google Research", + "aff_unique_url": ";https://www.duke.edu;https://research.google", + "aff_unique_abbr": ";Duke;Google Research", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "1;1;1", + "aff_country_unique": ";United States" + }, + { + "id": "2022.emnlp-main.446", + "title": "ATTEMPT: Parameter-Efficient Multi-task Tuning via Attentional Mixtures of Soft Prompts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This work introduces a new multi-task, parameter-efficient language model (LM) tuning method that learns to transfer knowledge across different tasks via a mixture of soft prompts\u2014small prefix embedding vectors pre-trained for different tasks. Our method, called ATTEMPT (ATTEntional Mixtures of Prompt Tuning), obtains source prompts as encodings of large-scale source tasks into a small number of parameters and trains an attention module to interpolate the source prompts and a newly initialized target prompt for every instance in the target task. During training, only the target task prompt and the attention weights, which are shared between tasks in multi-task training, are updated, while the original LM and source prompts are intact. ATTEMPT is highly parameter-efficient (e.g., updates 2,300 times fewer parameters than full fine-tuning), while it overcomes instability of prompt tuning and achieves high task performance using learned knowledge from high-resource tasks. Moreover, it is modular using pre-trained soft prompts, and can flexibly add or remove source prompts for effective knowledge transfer. Our experimental results across 21 diverse NLP datasets show that ATTEMPT significantly outperforms prompt tuning and outperforms or matches fully fine-tuned or other parameter-efficient tuning approaches that use 10 times more parameters. Finally, ATTEMPT outperforms previous work in few-shot learning settings.", + "author": "Akari Asai; Mohammadreza Salehi; Matthew Peters; Hannaneh Hajishirzi", + "authorids": "/a/akari-asai/; /m/mohammadreza-salehi/; /m/matthew-e-peters/; /h/hannaneh-hajishirzi/", + "bibtex": "@inproceedings{asai-etal-2022-attempt,\n title = \"{ATTEMPT}: Parameter-Efficient Multi-task Tuning via Attentional Mixtures of Soft Prompts\",\n author = \"Asai, Akari and\n Salehi, Mohammadreza and\n Peters, Matthew and\n Hajishirzi, Hannaneh\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.446/\",\n doi = \"10.18653/v1/2022.emnlp-main.446\",\n pages = \"6655--6672\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.446.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.446/", + "pdf_size": 653381, + "gs_citation": 107, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9345696559280297687&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Washington\u2661; University of Washington\u2661; Allen Institute for AI\u2662; University of Washington\u2661\u2662", + "aff_domain": "cs.washington.edu;cs.washington.edu;allenai.org;cs.washington.edu", + "email": "cs.washington.edu;cs.washington.edu;allenai.org;cs.washington.edu", + "github": "https://github.com/AkariAsai/ATTEMPT", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "University of Washington;Allen Institute for AI", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.washington.edu;https://allenai.org", + "aff_unique_abbr": "UW;AI2", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.412", + "title": "AX-MABSA: A Framework for Extremely Weakly Supervised Multi-label Aspect Based Sentiment Analysis", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Aspect Based Sentiment Analysis is a dominant research area with potential applications in social media analytics, business, finance, and health. Prior works in this area are primarily based on supervised methods, with a few techniques using weak supervision limited to predicting a single aspect category per review sentence. In this paper, we present an extremely weakly supervised multi-label Aspect Category Sentiment Analysis framework which does not use any labelled data. We only rely on a single word per class as an initial indicative information. We further propose an automatic word selection technique to choose these seed categories and sentiment words. We explore unsupervised language model post-training to improve the overall performance, and propose a multi-label generator model to generate multiple aspect category-sentiment pairs per review sentence. Experiments conducted on four benchmark datasets showcase our method to outperform other weakly supervised baselines by a significant margin.", + "author": "Sabyasachi Kamila; Walid Magdy; Sourav Dutta; MingXue Wang", + "authorids": "/s/sabyasachi-kamila/; /w/walid-magdy/; /s/sourav-dutta/; /m/mingxue-wang/", + "bibtex": "@inproceedings{kamila-etal-2022-ax,\n title = \"{AX}-{MABSA}: A Framework for Extremely Weakly Supervised Multi-label Aspect Based Sentiment Analysis\",\n author = \"Kamila, Sabyasachi and\n Magdy, Walid and\n Dutta, Sourav and\n Wang, MingXue\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.412/\",\n doi = \"10.18653/v1/2022.emnlp-main.412\",\n pages = \"6136--6147\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.412.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.412/", + "pdf_size": 489351, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8273858126721599490&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Informatics, University of Edinburgh; School of Informatics, University of Edinburgh; Huawei Research Centre, Dublin, Ireland; Huawei Research Centre, Dublin, Ireland", + "aff_domain": "inf.ed.ac.uk;inf.ed.ac.uk;huawei.com;huawei.com", + "email": "inf.ed.ac.uk;inf.ed.ac.uk;huawei.com;huawei.com", + "github": "https://github.com/sabyasachi-kamila/AX-MABSA", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;1", + "aff_unique_norm": "University of Edinburgh;Huawei Research Centre", + "aff_unique_dep": "School of Informatics;", + "aff_unique_url": "https://www.ed.ac.uk;https://www.huawei.com", + "aff_unique_abbr": "Edinburgh;Huawei", + "aff_campus_unique_index": "0;0;1;1", + "aff_campus_unique": "Edinburgh;Dublin", + "aff_country_unique_index": "0;0;1;1", + "aff_country_unique": "United Kingdom;Ireland" + }, + { + "id": "2022.emnlp-main.38", + "title": "Abstract Visual Reasoning with Tangram Shapes", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We introduce KiloGram, a resource for studying abstract visual reasoning in humans and machines. Drawing on the history of tangram puzzles as stimuli in cognitive science, we build a richly annotated dataset that, with >1k distinct stimuli, is orders of magnitude larger and more diverse than prior resources. It is both visually and linguistically richer, moving beyond whole shape descriptions to include segmentation maps and part labels. We use this resource to evaluate the abstract visual reasoning capacities of recent multi-modal models. We observe that pre-trained weights demonstrate limited abstract reasoning, which dramatically improves with fine-tuning. We also observe that explicitly describing parts aids abstract reasoning for both humans and models, especially when jointly encoding the linguistic and visual inputs.", + "author": "Anya Ji; Noriyuki Kojima; Noah Rush; Alane Suhr; Wai Keen Vong; Robert Hawkins; Yoav Artzi", + "authorids": "/a/anya-ji/; /n/noriyuki-kojima/; /n/noah-rush/; /a/alane-suhr/; /w/wai-keen-vong/; /r/robert-hawkins/; /y/yoav-artzi/", + "bibtex": "@inproceedings{ji-etal-2022-abstract,\n title = \"Abstract Visual Reasoning with Tangram Shapes\",\n author = \"Ji, Anya and\n Kojima, Noriyuki and\n Rush, Noah and\n Suhr, Alane and\n Vong, Wai Keen and\n Hawkins, Robert and\n Artzi, Yoav\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.38/\",\n doi = \"10.18653/v1/2022.emnlp-main.38\",\n pages = \"582--601\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.38.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.38/", + "pdf_size": 5394908, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15835847040211566766&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Cornell University; Cornell University; Cornell University; Cornell University+Allen Institute for AI; New York University; Princeton University; Cornell University", + "aff_domain": "cornell.edu;cornell.edu;gmail.com;cs.cornell.edu;nyu.edu;princeton.edu;cs.cornell.edu", + "email": "cornell.edu;cornell.edu;gmail.com;cs.cornell.edu;nyu.edu;princeton.edu;cs.cornell.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0+1;2;3;0", + "aff_unique_norm": "Cornell University;Allen Institute for AI;New York University;Princeton University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.cornell.edu;https://allenai.org;https://www.nyu.edu;https://www.princeton.edu", + "aff_unique_abbr": "Cornell;AI2;NYU;Princeton", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.355", + "title": "Abstractive Summarization Guided by Latent Hierarchical Document Structure", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Sequential abstractive neural summarizers often do not use the underlying structure in the input article or dependencies between the input sentences. This structure is essential to integrate and consolidate information from different parts of the text. To address this shortcoming, we propose a hierarchy-aware graph neural network (HierGNN) which captures such dependencies through three main steps: 1) learning a hierarchical document structure through a latent structure tree learned by a sparse matrix-tree computation; 2) propagating sentence information over this structure using a novel message-passing node propagation mechanism to identify salient information; 3) using graph-level attention to concentrate the decoder on salient information. Experiments confirm HierGNN improves strong sequence models such as BART, with a 0.55 and 0.75 margin in average ROUGE-1/2/L for CNN/DM and XSum. Further human evaluation demonstrates that summaries produced by our model are more relevant and less redundant than the baselines, into which HierGNN is incorporated. We also find HierGNN synthesizes summaries by fusing multiple source sentences more, rather than compressing a single source sentence, and that it processes long inputs more effectively.", + "author": "Yifu Qiu; Shay B. Cohen", + "authorids": "/y/yifu-qiu/; /s/shay-b-cohen/", + "bibtex": "@inproceedings{qiu-cohen-2022-abstractive,\n title = \"Abstractive Summarization Guided by Latent Hierarchical Document Structure\",\n author = \"Qiu, Yifu and\n Cohen, Shay B.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.355/\",\n doi = \"10.18653/v1/2022.emnlp-main.355\",\n pages = \"5303--5317\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.355.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.355/", + "pdf_size": 680096, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15018651073719193088&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 4, + "aff": "Institute for Language, Cognition and Computation, School of Informatics, University of Edinburgh; Institute for Language, Cognition and Computation, School of Informatics, University of Edinburgh", + "aff_domain": "sms.ed.ac.uk;inf.ed.ac.uk", + "email": "sms.ed.ac.uk;inf.ed.ac.uk", + "github": "https://github.com/yfqiu-nlp/hiergnn", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Edinburgh", + "aff_unique_dep": "School of Informatics", + "aff_unique_url": "https://www.ed.ac.uk", + "aff_unique_abbr": "Edinburgh", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Edinburgh", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.findings-emnlp.205", + "title": "Accelerating Learned Sparse Indexes Via Term Impact Decomposition", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Novel inverted index-based learned sparse ranking models provide more effective, but less efficient, retrieval performance compared to traditional ranking models like BM25. In this paper, we introduce a technique we call postings clipping to improve the query efficiency of learned representations. Our technique amplifies the benefit of dynamic pruning query processing techniques by accounting for changes in term importance distributions of learned ranking models. The new clipping mechanism accelerates top-k retrieval by up to 9.6X without any loss in effectiveness.", + "author": "Joel Mackenzie; Antonio Mallia; Alistair Moffat; Matthias Petri", + "authorids": "/j/joel-mackenzie/; /a/antonio-mallia/; /a/alistair-moffat/; /m/matthias-petri/", + "bibtex": "@inproceedings{mackenzie-etal-2022-accelerating,\n title = \"Accelerating Learned Sparse Indexes Via Term Impact Decomposition\",\n author = \"Mackenzie, Joel and\n Mallia, Antonio and\n Moffat, Alistair and\n Petri, Matthias\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.205/\",\n doi = \"10.18653/v1/2022.findings-emnlp.205\",\n pages = \"2830--2842\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.205.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.205/", + "pdf_size": 767012, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8044246889991596388&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 11, + "aff": "University of Queensland, Australia; Amazon Alexa, Italy; University of Melbourne, Australia; Amazon Alexa, USA", + "aff_domain": "uq.edu.au;amazon.com;unimelb.edu.au;amazon.com", + "email": "uq.edu.au;amazon.com;unimelb.edu.au;amazon.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "University of Queensland;Amazon Alexa;University of Melbourne", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.uq.edu.au;https://www.amazon.com/alexa;https://www.unimelb.edu.au", + "aff_unique_abbr": "UQ;Amazon Alexa;UniMelb", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;2", + "aff_country_unique": "Australia;Italy;United States" + }, + { + "id": "2022.emnlp-industry.6", + "title": "Accelerating the Discovery of Semantic Associations from Medical Literature: Mining Relations Between Diseases and Symptoms", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Medical literature is a vast and constantly expanding source of information about diseases, their diagnoses and treatments. One of the ways to extract insights from this type of data is through mining association rules between such entities. However, existing solutions do not take into account the semantics of sentences from which entity co-occurrences are extracted. We propose a scalable solution for the automated discovery of semantic associations between different entities such as diseases and their symptoms. Our approach employs the UMLS semantic network and a binary relation classification model trained with distant supervision to validate and help ranking the most likely entity associations pairs extracted with frequency-based association rule mining algorithms. We evaluate the proposed system on the task of extracting disease-symptom associations from a collection of over 14M PubMed abstracts and validate our results against a publicly available known list of disease-symptom pairs.", + "author": "Alberto Purpura; Francesca Bonin; Joao Bettencourt-silva", + "authorids": "/a/alberto-purpura/; /f/francesca-bonin/; /j/joao-bettencourt-silva/", + "bibtex": "@inproceedings{purpura-etal-2022-accelerating,\n title = \"Accelerating the Discovery of Semantic Associations from Medical Literature: Mining Relations Between Diseases and Symptoms\",\n author = \"Purpura, Alberto and\n Bonin, Francesca and\n Bettencourt-silva, Joao\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.6/\",\n doi = \"10.18653/v1/2022.emnlp-industry.6\",\n pages = \"77--89\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.6.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.6/", + "pdf_size": 323733, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9951753140767983820&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "IBM Research Europe; IBM Research Europe; IBM Research Europe", + "aff_domain": "ibm.com;ie.ibm.com;ie.ibm.com", + "email": "ibm.com;ie.ibm.com;ie.ibm.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "IBM Research", + "aff_unique_dep": "Research", + "aff_unique_url": "https://www.ibm.com/research/europe", + "aff_unique_abbr": "IBM Research", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Europe" + }, + { + "id": "2022.findings-emnlp.7", + "title": "Acceptability Judgements via Examining the Topology of Attention Maps", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The role of the attention mechanism in encoding linguistic knowledge has received special interest in NLP. However, the ability of the attention heads to judge the grammatical acceptability of a sentence has been underexplored. This paper approaches the paradigm of acceptability judgments with topological data analysis (TDA), showing that the geometric properties of the attention graph can be efficiently exploited for two standard practices in linguistics: binary judgments and linguistic minimal pairs. Topological features enhance the BERT-based acceptability classifier scores by 8%-24% on CoLA in three languages (English, Italian, and Swedish). By revealing the topological discrepancy between attention maps of minimal pairs, we achieve the human-level performance on the BLiMP benchmark, outperforming nine statistical and Transformer LM baselines. At the same time, TDA provides the foundation for analyzing the linguistic functions of attention heads and interpreting the correspondence between the graph features and grammatical phenomena. We publicly release the code and other materials used in the experiments.", + "author": "Daniil Cherniavskii; Eduard Tulchinskii; Vladislav Mikhailov; Irina Proskurina; Laida Kushnareva; Ekaterina Artemova; Serguei Barannikov; Irina Piontkovskaya; Dmitri Piontkovski; Evgeny Burnaev", + "authorids": "/d/daniil-cherniavskii/; /e/eduard-tulchinskii/; /v/vladislav-mikhailov/; /i/irina-proskurina/; /l/laida-kushnareva/; /e/ekaterina-artemova/; /s/serguei-barannikov/; /i/irina-piontkovskaya/; /d/dmitri-piontkovski/; /e/evgeny-burnaev/", + "bibtex": "@inproceedings{cherniavskii-etal-2022-acceptability,\n title = \"Acceptability Judgements via Examining the Topology of Attention Maps\",\n author = \"Cherniavskii, Daniil and\n Tulchinskii, Eduard and\n Mikhailov, Vladislav and\n Proskurina, Irina and\n Kushnareva, Laida and\n Artemova, Ekaterina and\n Barannikov, Serguei and\n Piontkovskaya, Irina and\n Piontkovski, Dmitri and\n Burnaev, Evgeny\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.7/\",\n doi = \"10.18653/v1/2022.findings-emnlp.7\",\n pages = \"88--107\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.7.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.7/", + "pdf_size": 955207, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12917296352144513608&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 8, + "aff": "Skolkovo Institute of Science and Technology+AIRI; Skolkovo Institute of Science and Technology+AIRI; SberDevices; HSE University; Huawei Noah\u2019s Ark lab; Huawei Noah\u2019s Ark lab+Center for Information and Language Processing (CIS), LMU Munich, Germany; Skolkovo Institute of Science and Technology+CNRS, IMJ; Huawei Noah\u2019s Ark lab; HSE University; Skolkovo Institute of Science and Technology+AIRI", + "aff_domain": "skoltech.ru; ; ; ; ; ; ; ; ; ", + "email": "skoltech.ru; ; ; ; ; ; ; ; ; ", + "github": "github.com/danchern97/tda4la", + "project": "", + "author_num": 10, + "aff_unique_index": "0+1;0+1;2;3;4;4+5;0+6;4;3;0+1", + "aff_unique_norm": "Skolkovo Institute of Science and Technology;Artificial Intelligence Research Institute;SberDevices;Higher School of Economics;Huawei;LMU Munich;CNRS", + "aff_unique_dep": ";;;;Noah\u2019s Ark lab;Center for Information and Language Processing (CIS);Institut de Math\u00e9matiques de Jussieu", + "aff_unique_url": "https://www.skoltech.ru;https://www.airi.jp;https://sberdevices.ru;https://hse.ru;https://www.huawei.com;https://www.lmu.de;https://www.cnrs.fr", + "aff_unique_abbr": "Skoltech;AIRI;SberDevices;HSE;Huawei;LMU;CNRS", + "aff_campus_unique_index": ";;1;;", + "aff_campus_unique": ";Munich", + "aff_country_unique_index": "0+1;0+1;0;0;2;2+3;0+4;2;0;0+1", + "aff_country_unique": "Russia;Japan;China;Germany;France" + }, + { + "id": "2022.emnlp-main.622", + "title": "Active Example Selection for In-Context Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "With a handful of demonstration examples, large-scale language models demonstrate strong capability to perform various tasks by in-context learning from these examples, without any fine-tuning. We demonstrate that in-context learning performance can be highly unstable across samples of examples, indicating the idiosyncrasies of how language models acquire information. We formulate example selection for in-context learning as a sequential decision problem, and propose a reinforcement learning algorithm for identifying generalizable policies to select demonstration examples. For GPT-2, our learned policies demonstrate strong abilities of generalizing to unseen tasks in training, with a 5.8% improvement on average. Examples selected from our learned policies can even achieve a small improvement on GPT-3 Ada. However, the improvement diminishes on larger GPT-3 models, suggesting emerging capabilities of large language models.", + "author": "Yiming Zhang; Shi Feng; Chenhao Tan", + "authorids": "/y/yiming-zhang/; /s/shi-feng/; /c/chenhao-tan/", + "bibtex": "@inproceedings{zhang-etal-2022-active,\n title = \"Active Example Selection for In-Context Learning\",\n author = \"Zhang, Yiming and\n Feng, Shi and\n Tan, Chenhao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.622/\",\n doi = \"10.18653/v1/2022.emnlp-main.622\",\n pages = \"9134--9148\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.622.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.622/", + "pdf_size": 455240, + "gs_citation": 186, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3744597137114959810&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff": "University of Chicago; University of Chicago; University of Chicago", + "aff_domain": "uchicago.edu;uchicago.edu;uchicago.edu", + "email": "uchicago.edu;uchicago.edu;uchicago.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Chicago", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uchicago.edu", + "aff_unique_abbr": "UChicago", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.377", + "title": "Active Learning for Abstractive Text Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Construction of human-curated annotated datasets for abstractive text summarization (ATS) is very time-consuming and expensive because creating each instance requires a human annotator to read a long document and compose a shorter summary that would preserve the key information relayed by the original document. Active Learning (AL) is a technique developed to reduce the amount of annotation required to achieve a certain level of machine learning model performance. In information extraction and text classification, AL can reduce the amount of labor up to multiple times. Despite its potential for aiding expensive annotation, as far as we know, there were no effective AL query strategies for ATS. This stems from the fact that many AL strategies rely on uncertainty estimation, while as we show in our work, uncertain instances are usually noisy, and selecting them can degrade the model performance compared to passive annotation. We address this problem by proposing the first effective query strategy for AL in ATS based on diversity principles. We show that given a certain annotation budget, using our strategy in AL annotation helps to improve the model performance in terms of ROUGE and consistency scores. Additionally, we analyze the effect of self-learning and show that it can additionally increase the performance of the model.", + "author": "Akim Tsvigun; Ivan Lysenko; Danila Sedashov; Ivan Lazichny; Eldar Damirov; Vladimir Karlov; Artemy Belousov; Leonid Sanochkin; Maxim Panov; Alexander Panchenko; Mikhail Burtsev; Artem Shelmanov", + "authorids": "/a/akim-tsvigun/; /i/ivan-lysenko/; /d/danila-sedashov/; /i/ivan-lazichny/; /e/eldar-damirov/; /v/vladimir-karlov/; /a/artemy-belousov/; /l/leonid-sanochkin/; /m/maxim-panov/; /a/alexander-panchenko/; /m/mikhail-burtsev/; /a/artem-shelmanov/", + "bibtex": "@inproceedings{tsvigun-etal-2022-active,\n title = \"Active Learning for Abstractive Text Summarization\",\n author = \"Tsvigun, Akim and\n Lysenko, Ivan and\n Sedashov, Danila and\n Lazichny, Ivan and\n Damirov, Eldar and\n Karlov, Vladimir and\n Belousov, Artemy and\n Sanochkin, Leonid and\n Panov, Maxim and\n Panchenko, Alexander and\n Burtsev, Mikhail and\n Shelmanov, Artem\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.377/\",\n doi = \"10.18653/v1/2022.findings-emnlp.377\",\n pages = \"5128--5152\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.377.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.377/", + "pdf_size": 1814389, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18245599794276241032&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": ";;;;;;;;;;;", + "aff_domain": ";;;;;;;;;;;", + "email": ";;;;;;;;;;;", + "github": "", + "project": "", + "author_num": 12 + }, + { + "id": "2022.emnlp-main.388", + "title": "AdaMix: Mixture-of-Adaptations for Parameter-efficient Model Tuning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Standard fine-tuning of large pre-trained language models (PLMs) for downstream tasks requires updating hundreds of millions to billions of parameters, and storing a large copy of the PLM weights for every task resulting in increased cost for storing, sharing and serving the models. To address this, parameter-efficient fine-tuning (PEFT) techniques were introduced where small trainable components are injected in the PLM and updated during fine-tuning. We propose AdaMix as a general PEFT method that tunes a mixture of adaptation modules \u2013 given the underlying PEFT method of choice \u2013 introduced in each Transformer layer while keeping most of the PLM weights frozen. For instance, AdaMix can leverage a mixture of adapters like Houlsby or a mixture of low rank decomposition matrices like LoRA to improve downstream task performance over the corresponding PEFT methods for fully supervised and few-shot NLU and NLG tasks. Further, we design AdaMix such that it matches the same computational cost and the number of tunable parameters as the underlying PEFT method. By only tuning 0.1-0.2% of PLM parameters, we show that AdaMix outperforms SOTA parameter-efficient fine-tuning and full model fine-tuning for both NLU and NLG tasks.", + "author": "Yaqing Wang; Sahaj Agarwal; Subhabrata Mukherjee; Xiaodong Liu; Jing Gao; Ahmed Hassan Awadallah; Jianfeng Gao", + "authorids": "/y/yaqing-wang/; /s/sahaj-agarwal/; /s/subhabrata-mukherjee/; /x/xiaodong-liu/; /j/jing-gao/; /a/ahmed-hassan/; /j/jianfeng-gao/", + "bibtex": "@inproceedings{wang-etal-2022-adamix,\n title = \"{A}da{M}ix: Mixture-of-Adaptations for Parameter-efficient Model Tuning\",\n author = \"Wang, Yaqing and\n Agarwal, Sahaj and\n Mukherjee, Subhabrata and\n Liu, Xiaodong and\n Gao, Jing and\n Awadallah, Ahmed Hassan and\n Gao, Jianfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.388/\",\n doi = \"10.18653/v1/2022.emnlp-main.388\",\n pages = \"5744--5760\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.388.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.388/", + "pdf_size": 830351, + "gs_citation": 145, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9411931278696214240&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff": "Purdue University; Microsoft; Microsoft Research; Microsoft Research; Purdue University; Microsoft Research; Microsoft Research", + "aff_domain": "purdue.edu;microsoft.com;microsoft.com; ; ; ; ", + "email": "purdue.edu;microsoft.com;microsoft.com; ; ; ; ", + "github": "", + "project": "https://aka.ms/AdaMix", + "author_num": 7, + "aff_unique_index": "0;1;1;1;0;1;1", + "aff_unique_norm": "Purdue University;Microsoft Corporation", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.purdue.edu;https://www.microsoft.com", + "aff_unique_abbr": "Purdue;Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.448", + "title": "AdaPrompt: Adaptive Model Training for Prompt-based NLP", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Prompt-based learning, with its capability to tackle zero-shot and few-shot NLP tasks, has gained much attention in the community.The main idea is to bridge the gap between NLP downstream tasks and language modeling (LM), by mapping these tasks into natural language prompts, which are then filled by pre-trained language models (PLMs).However, for prompt learning, there are still two salient gaps between NLP tasks and pretraining.First, prompt information is not necessarily sufficiently present during LM pre-training. Second, task-specific data are not necessarily well represented during pre-training. We address these two issues by proposing AdaPrompt, adaptively retrieving external data for continual pretraining of PLMs by making use of both task and prompt characteristics. In addition, we make use of knowledge in Natural Language Inference models for deriving adaptive verbalizers.Experimental results on five NLP benchmarks show that AdaPrompt can improve over standard PLMs in few-shot settings. In addition, in zero-shot settings, our method outperforms standard prompt-based methods by up to 26.35% relative error reduction.", + "author": "Yulong Chen; Yang Liu; Li Dong; Shuohang Wang; Chenguang Zhu; Michael Zeng; Yue Zhang", + "authorids": "/y/yulong-chen/; /y/yang-liu/; /l/li-dong/; /s/shuohang-wang/; /c/chenguang-zhu/; /m/michael-zeng/; /y/yue-zhang/", + "bibtex": "@inproceedings{chen-etal-2022-adaprompt,\n title = \"{A}da{P}rompt: Adaptive Model Training for Prompt-based {NLP}\",\n author = \"Chen, Yulong and\n Liu, Yang and\n Dong, Li and\n Wang, Shuohang and\n Zhu, Chenguang and\n Zeng, Michael and\n Zhang, Yue\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.448/\",\n doi = \"10.18653/v1/2022.findings-emnlp.448\",\n pages = \"6057--6068\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.448.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.448/", + "pdf_size": 2796723, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3725115380459501193&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Zhejiang University\u2661Westlake University; Microsoft Research; Microsoft Research; Microsoft Research; Microsoft Research; Microsoft Research; Westlake University\u2662Westlake Institute for Advanced Study", + "aff_domain": "gmail.com;microsoft.com; ; ; ; ;wias.org.cn", + "email": "gmail.com;microsoft.com; ; ; ; ;wias.org.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;1;1;1;2", + "aff_unique_norm": "Zhejiang University;Microsoft Corporation;Westlake University", + "aff_unique_dep": ";Microsoft Research;Westlake Institute for Advanced Study", + "aff_unique_url": "http://www.zju.edu.cn;https://www.microsoft.com/en-us/research;https://www.westlake.edu.cn/", + "aff_unique_abbr": "ZJU;MSR;WU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.728", + "title": "AdapterShare: Task Correlation Modeling with Adapter Differentiation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Thanks to the development of pre-trained language models, multitask learning (MTL) methods achieve a great success in natural language understanding area.However, current MTL methods pay more attention to task selection or model design to fuse as much knowledge as possible, while intrinsic task correlation is often neglected. It is important to learn sharing strategy among multiple tasks rather than sharing everything.%The MTL model is directly shared among all the tasks. %For example, in traditional MTL methods, the last classification layers or the decoder layers are manually separated. More deeply, In this paper, we propose AdapterShare, an adapter differentiation method to explicitly model the task correlation among multiple tasks. AdapterShare is automatically learned based on the gradients on tiny held-out validation data. Compared to single-task learning and fully shared MTL methods, our proposed method obtains obvious performance improvement. Compared to the existing MTL method AdapterFusion, AdapterShare achieves absolute 1.90 average points improvement on five dialogue understanding tasks and 2.33 points gain on NLU tasks.", + "author": "Zhi Chen; Bei Chen; Lu Chen; Kai Yu; Jian-Guang Lou", + "authorids": "/z/zhi-chen/; /b/bei-chen/; /l/lu-chen/; /k/kai-yu/; /j/jian-guang-lou/", + "bibtex": "@inproceedings{chen-etal-2022-adaptershare,\n title = \"{A}dapter{S}hare: Task Correlation Modeling with Adapter Differentiation\",\n author = \"Chen, Zhi and\n Chen, Bei and\n Chen, Lu and\n Yu, Kai and\n Lou, Jian-Guang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.728/\",\n doi = \"10.18653/v1/2022.emnlp-main.728\",\n pages = \"10645--10651\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.728.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.728/", + "pdf_size": 729815, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4738672840020733701&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 2, + "aff": "X-LANCE Lab, Department of Computer Science and Engineering, MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University; Microsoft Research Asia; X-LANCE Lab, Department of Computer Science and Engineering, MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University; X-LANCE Lab, Department of Computer Science and Engineering, MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University; Microsoft Research Asia", + "aff_domain": "sjtu.edu.cn;microsoft.com;sjtu.edu.cn;sjtu.edu.cn;microsoft.com", + "email": "sjtu.edu.cn;microsoft.com;sjtu.edu.cn;sjtu.edu.cn;microsoft.com", + "github": "https://github.com/microsoft/ContextualSP", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;1", + "aff_unique_norm": "Shanghai Jiao Tong University;Microsoft Research", + "aff_unique_dep": "Department of Computer Science and Engineering;Research", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "SJTU;MSR Asia", + "aff_campus_unique_index": "0;1;0;0;1", + "aff_campus_unique": "Shanghai;Asia", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.287", + "title": "Adapters for Enhanced Modeling of Multilingual Knowledge and Text", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large language models appear to learn facts from the large text corpora they are trained on. Such facts are encoded implicitly within their many parameters, making it difficult to verify or manipulate what knowledge has been learned. Language models have recently been extended to multilingual language models (MLLMs), enabling knowledge to be learned across hundreds of languages. Meanwhile, knowledge graphs contain facts in an explicit triple format, which require careful and costly curation and are only available in a few high-resource languages, restricting their research and application. To address these issues, we propose to enhance MLLMs with knowledge from multilingual knowledge graphs (MLKGs) so as to tackle language and knowledge graph tasks across many languages, including low-resource ones. Specifically, we introducea lightweight adapter set to enhance MLLMs with cross-lingual entity alignment and facts from MLKGs for many languages. Experiments on common benchmarks show that such enhancement benefits both MLLMs and MLKGs, achieving: (1) comparable or improved performance for knowledge graph completion and entity alignment relative to baselines, especially for low-resource languages (for which knowledge graphs are unavailable); and (2) improved MLLM performance on language understanding tasks that require multilingual factual knowledge; all while maintaining performance on other general language tasks.", + "author": "Yifan Hou; Wenxiang Jiao; Meizhen Liu; Carl Allen; Zhaopeng Tu; Mrinmaya Sachan", + "authorids": "/y/yifan-hou/; /w/wenxiang-jiao/; /m/meizhen-liu/; /c/carl-allen/; /z/zhaopeng-tu/; /m/mrinmaya-sachan/", + "bibtex": "@inproceedings{hou-etal-2022-adapters,\n title = \"Adapters for Enhanced Modeling of Multilingual Knowledge and Text\",\n author = \"Hou, Yifan and\n Jiao, Wenxiang and\n Liu, Meizhen and\n Allen, Carl and\n Tu, Zhaopeng and\n Sachan, Mrinmaya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.287/\",\n doi = \"10.18653/v1/2022.findings-emnlp.287\",\n pages = \"3902--3917\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.287.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.287/", + "pdf_size": 737220, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14780012222037574819&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "ETH Z\u00fcrich; Tencent AI Lab; Shandong University; ETH Z\u00fcrich; Tencent AI Lab; ETH Z\u00fcrich", + "aff_domain": "inf.ethz.ch;inf.ethz.ch;inf.ethz.ch;tencent.com;tencent.com;mail.sdu.edu.cn", + "email": "inf.ethz.ch;inf.ethz.ch;inf.ethz.ch;tencent.com;tencent.com;mail.sdu.edu.cn", + "github": "https://github.com/yifan-h/Multilingual_Space", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;1;0", + "aff_unique_norm": "ETH Z\u00fcrich;Tencent;Shandong University", + "aff_unique_dep": ";Tencent AI Lab;", + "aff_unique_url": "https://www.ethz.ch;https://ai.tencent.com;http://www.sdu.edu.cn", + "aff_unique_abbr": "ETHZ;Tencent AI Lab;SDU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0;1;0", + "aff_country_unique": "Switzerland;China" + }, + { + "id": "2022.findings-emnlp.528", + "title": "Adapting Multilingual Models for Code-Mixed Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The scarcity of gold standard code-mixed to pure language parallel data makes it difficult to train translation models reliably.Prior work has addressed the paucity of parallel data with data augmentation techniques.Such methods rely heavily on external resources making systems difficult to train and scale effectively for multiple languages.We present a simple yet highly effective two-stage back-translation based training scheme for adapting multilingual models to the task of code-mixed translation which eliminates dependence on external resources.We show a substantial improvement in translation quality (measured through BLEU), beating existing prior work by up to +3.8 BLEU on code-mixed Hi\u2192En, Mr\u2192En, and Bn\u2192En tasks. On the LinCE Machine Translation leader board, we achieve the highest score for code-mixed Es\u2192En, beating existing best baseline by +6.5 BLEU, and our own stronger baseline by +1.1 BLEU.", + "author": "Aditya Vavre; Abhirut Gupta; Sunita Sarawagi", + "authorids": "/a/aditya-vavre/; /a/abhirut-gupta/; /s/sunita-sarawagi/", + "bibtex": "@inproceedings{vavre-etal-2022-adapting,\n title = \"Adapting Multilingual Models for Code-Mixed Translation\",\n author = \"Vavre, Aditya and\n Gupta, Abhirut and\n Sarawagi, Sunita\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.528/\",\n doi = \"10.18653/v1/2022.findings-emnlp.528\",\n pages = \"7133--7141\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.528.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.528/", + "pdf_size": 472357, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16247252855728186353&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "IIT Bombay; Google Research; IIT Bombay", + "aff_domain": "cse.iitb.ac.in;google.com;cse.iitb.ac.in", + "email": "cse.iitb.ac.in;google.com;cse.iitb.ac.in", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Indian Institute of Technology Bombay;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.iitb.ac.in;https://research.google", + "aff_unique_abbr": "IITB;Google Research", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Mumbai;Mountain View", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "India;United States" + }, + { + "id": "2022.emnlp-main.693", + "title": "Adapting a Language Model While Preserving its General Knowledge", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Domain-adaptive pre-training (or DA-training for short), also known as post-training, aimsto train a pre-trained general-purpose language model (LM) using an unlabeled corpus of aparticular domain to adapt the LM so that end-tasks in the domain can give improved performances. However, existing DA-training methods are in some sense blind as they do not explicitly identify what knowledge in the LM should be preserved and what should be changed by the domain corpus. This paper shows that the existing methods are suboptimal and proposes a novel method to perform a more informed adaptation of the knowledge in the LM by (1) soft-masking the attention heads based on their importance to best preserve the general knowledge in the LM and (2) contrasting the representations of the general and the full (both general and domain knowledge) to learn an integrated representation with both general and domain-specific knowledge. Experimental results will demonstrate the effectiveness of the proposed approach.", + "author": "Zixuan Ke; Yijia Shao; Haowei Lin; Hu Xu; Lei Shu; Bing Liu", + "authorids": "/z/zixuan-ke/; /y/yijia-shao/; /h/haowei-lin/; /h/hu-xu/; /l/lei-shu/; /b/bing-liu/", + "bibtex": "@inproceedings{ke-etal-2022-adapting,\n title = \"Adapting a Language Model While Preserving its General Knowledge\",\n author = \"Ke, Zixuan and\n Shao, Yijia and\n Lin, Haowei and\n Xu, Hu and\n Shu, Lei and\n Liu, Bing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.693/\",\n doi = \"10.18653/v1/2022.emnlp-main.693\",\n pages = \"10177--10188\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.693.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.693/", + "pdf_size": 452442, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12818258189154933391&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science, University of Illinois at Chicago; Wangxuan Institute of Computer Technology, Peking University; Meta AI; Department of Computer Science, University of Illinois at Chicago; Department of Computer Science, University of Illinois at Chicago; Department of Computer Science, University of Illinois at Chicago", + "aff_domain": "uic.edu;pku.edu.cn;pku.edu.cn;fb.com;google.com;uic.edu", + "email": "uic.edu;pku.edu.cn;pku.edu.cn;fb.com;google.com;uic.edu", + "github": "https://github.com/UIC-Liu-Lab/DGAmodel", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;0;0", + "aff_unique_norm": "University of Illinois at Chicago;Peking University;Meta Platforms, Inc.", + "aff_unique_dep": "Department of Computer Science;Wangxuan Institute of Computer Technology;Meta AI", + "aff_unique_url": "https://www.uic.edu;http://www.pku.edu.cn;https://meta.com", + "aff_unique_abbr": "UIC;PKU;Meta", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Chicago;", + "aff_country_unique_index": "0;1;0;0;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.686", + "title": "Adaptive Contrastive Learning on Multimodal Transformer for Review Helpfulness Prediction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Modern Review Helpfulness Prediction systems are dependent upon multiple modalities, typically texts and images. Unfortunately, those contemporary approaches pay scarce attention to polish representations of cross-modal relations and tend to suffer from inferior optimization. This might cause harm to model\u2019s predictions in numerous cases. To overcome the aforementioned issues, we propose Multi-modal Contrastive Learning for Multimodal Review Helpfulness Prediction (MRHP) problem, concentrating on mutual information between input modalities to explicitly elaborate cross-modal relations. In addition, we introduce Adaptive Weighting scheme for our contrastive learning approach in order to increase flexibility in optimization. Lastly, we propose Multimodal Interaction module to address the unalignment nature of multimodal data, thereby assisting the model in producing more reasonable multimodal representations. Experimental results show that our method outperforms prior baselines and achieves state-of-the-art results on two publicly available benchmark datasets for MRHP problem.", + "author": "Thong Nguyen; Xiaobao Wu; Anh Tuan Luu; Zhen Hai; Lidong Bing", + "authorids": "/t/thong-nguyen/; /x/xiaobao-wu/; /l/luu-anh-tuan/; /z/zhen-hai/; /l/lidong-bing/", + "bibtex": "@inproceedings{nguyen-etal-2022-adaptive,\n title = \"Adaptive Contrastive Learning on Multimodal Transformer for Review Helpfulness Prediction\",\n author = \"Nguyen, Thong and\n Wu, Xiaobao and\n Luu, Anh Tuan and\n Hai, Zhen and\n Bing, Lidong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.686/\",\n doi = \"10.18653/v1/2022.emnlp-main.686\",\n pages = \"10085--10096\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.686.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.686/", + "pdf_size": 1262981, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16708717653135220163&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.findings-emnlp.444", + "title": "Adaptive Graph Convolutional Network for Knowledge Graph Entity Alignment", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Entity alignment (EA) aims to identify equivalent entities from different Knowledge Graphs (KGs), which is a fundamental task for integrating KGs. Throughout its development, Graph Convolutional Network (GCN) has become one of the mainstream methods for EA. These GCN-based methods learn the representations of entities from two KGs by message passing mechanism and then make alignments via measuring the similarity between entity embeddings. The key idea that GCN works in EA is that entities with similar neighbor structures are highly likely to be aligned. However, the noisy neighbors of entities transfer invalid information, drown out equivalent information, lead to inaccurate entity embeddings, and finally reduce the performance of EA. Based on the Sinkhorn algorithm, we design a reliability measure for potential equivalent entities and propose Adaptive Graph Convolutional Network to deal with neighbor noises in GCN. During the training, the network dynamically updates the adaptive weights of relation triples to weaken the propagation of noises. While calculating entity similarity, it comprehensively considers the self-similarity and neighborhood similarity of the entity pair to alleviate the influence of noises. Furthermore, we design a straightforward but efficient strategy to construct pseudo alignments for unsupervised EA. Extensive experiments on benchmark datasets demonstrate that our framework outperforms the state-of-the-art methods in both supervised and unsupervised settings.", + "author": "Renbo Zhu; Xukun Luo; Meng Ma; Ping Wang", + "authorids": "/r/renbo-zhu/; /x/xukun-luo/; /m/meng-ma/; /p/ping-wang/", + "bibtex": "@inproceedings{zhu-etal-2022-adaptive,\n title = \"Adaptive Graph Convolutional Network for Knowledge Graph Entity Alignment\",\n author = \"Zhu, Renbo and\n Luo, Xukun and\n Ma, Meng and\n Wang, Ping\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.444/\",\n doi = \"10.18653/v1/2022.findings-emnlp.444\",\n pages = \"6011--6021\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.444.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.444/", + "pdf_size": 2127240, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17377476619959464392&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "School of Software and Microelectronics, Peking University; School of Software and Microelectronics, Peking University; School of Software and Microelectronics, Peking University + National Engineering Research Center for Software Engineering, Peking University + Key Laboratory of High Confidence Software Technologies (PKU), Ministry of Education; School of Software and Microelectronics, Peking University + National Engineering Research Center for Software Engineering, Peking University + Key Laboratory of High Confidence Software Technologies (PKU), Ministry of Education", + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+0+0;0+0+0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Software and Microelectronics", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.664", + "title": "Adaptive Label Smoothing with Self-Knowledge in Natural Language Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Overconfidence has been shown to impair generalization and calibration of a neural network. Previous studies remedy this issue by adding a regularization term to a loss function, preventing a model from making a peaked distribution. Label smoothing smoothes target labels with a pre-defined prior label distribution; as a result, a model is learned to maximize the likelihood of predicting the soft label. Nonetheless, the amount of smoothing is the same in all samples and remains fixed in training. In other words, label smoothing does not reflect the change in probability distribution mapped by a model over the course of training. To address this issue, we propose a regularization scheme that brings dynamic nature into the smoothing parameter by taking model probability distribution into account, thereby varying the parameter per instance. A model in training self-regulates the extent of smoothing on the fly during forward propagation. Furthermore, inspired by recent work in bridging label smoothing and knowledge distillation, our work utilizes self-knowledge as a prior label distribution in softening target labels, and presents theoretical support for the regularization effect by knowledge distillation and the dynamic smoothing parameter. Our regularizer is validated comprehensively, and the result illustrates marked improvements in model generalization and calibration, enhancing robustness and trustworthiness of a model.", + "author": "Dongkyu Lee; Ka Chun Cheung; Nevin Zhang", + "authorids": "/d/dongkyu-lee/; /k/ka-chun-cheung/; /n/nevin-zhang/", + "bibtex": "@inproceedings{lee-etal-2022-adaptive,\n title = \"Adaptive Label Smoothing with Self-Knowledge in Natural Language Generation\",\n author = \"Lee, Dongkyu and\n Cheung, Ka Chun and\n Zhang, Nevin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.664/\",\n doi = \"10.18653/v1/2022.emnlp-main.664\",\n pages = \"9781--9792\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.664.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.664/", + "pdf_size": 1724658, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1512772715629252104&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Engineering, HKUST + NVIDIA AI Technology Center, NVIDIA; NVIDIA AI Technology Center, NVIDIA; Department of Computer Science and Engineering, HKUST", + "aff_domain": "cse.ust.hk;nvidia.com;cse.ust.hk", + "email": "cse.ust.hk;nvidia.com;cse.ust.hk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;0", + "aff_unique_norm": "Hong Kong University of Science and Technology;NVIDIA", + "aff_unique_dep": "Department of Computer Science and Engineering;NVIDIA AI Technology Center", + "aff_unique_url": "https://www.hkust.edu.hk;https://www.nvidia.com", + "aff_unique_abbr": "HKUST;NVIDIA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.119", + "title": "Adaptive Ranking-based Sample Selection for Weakly Supervised Class-imbalanced Text Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "To obtain a large amount of training labels inexpensively, researchers have recently adopted the weak supervision (WS) paradigm, which leverages labeling rules to synthesize training labels rather than using individual annotations to achieve competitive results for natural language processing (NLP) tasks. However, data imbalance is often overlooked in applying the WS paradigm, despite being a common issue in a variety of NLP tasks. To address this challenge, we propose Adaptive Ranking-based Sample Selection (ARS2), a model-agnostic framework to alleviate the data imbalance issue in the WS paradigm. Specifically, it calculates a probabilistic margin score based on the output of the current model to measure and rank the cleanliness of each data point. Then, the ranked data are sampled based on both class-wise and rule-aware ranking. In particular, the two sample strategies corresponds to our motivations: (1) to train the model with balanced data batches to reduce the data imbalance issue and (2) to exploit the expertise of each labeling rule for collecting clean samples. Experiments on four text classification datasets with four different imbalance ratios show that ARS2 outperformed the state-of-the-art imbalanced learning and WS methods, leading to a 2%-57.8% improvement on their F1-score.", + "author": "Linxin Song; Jieyu Zhang; Tianxiang Yang; Masayuki Goto", + "authorids": "/l/linxin-song/; /j/jieyu-zhang/; /t/tianxiang-yang/; /m/masayuki-goto/", + "bibtex": "@inproceedings{song-etal-2022-adaptive,\n title = \"Adaptive Ranking-based Sample Selection for Weakly Supervised Class-imbalanced Text Classification\",\n author = \"Song, Linxin and\n Zhang, Jieyu and\n Yang, Tianxiang and\n Goto, Masayuki\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.119/\",\n doi = \"10.18653/v1/2022.findings-emnlp.119\",\n pages = \"1641--1655\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.119.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.119/", + "pdf_size": 1267588, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13888965302124866417&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff": "Waseda University; University of Washington; Waseda University; Waseda University", + "aff_domain": "ruri.waseda.jp;cs.washington.edu;akane.waseda.jp;waseda.jp", + "email": "ruri.waseda.jp;cs.washington.edu;akane.waseda.jp;waseda.jp", + "github": "https://github.com/JieyuZ2/wrench/blob/main/wrench/endmodel/ars2.py", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Waseda University;University of Washington", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.waseda.jp/top;https://www.washington.edu", + "aff_unique_abbr": "Waseda;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "Japan;United States" + }, + { + "id": "2022.emnlp-main.687", + "title": "Adaptive Token-level Cross-lingual Feature Mixing for Multilingual Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multilingual neural machine translation aims to translate multiple language pairs in a single model and has shown great success thanks to the knowledge transfer across languages with the shared parameters. Despite promising, this share-all paradigm suffers from insufficient ability to capture language-specific features. Currently, the common practice is to insert or search language-specific networks to balance the shared and specific features. However, those two types of features are not sufficient enough to model the complex commonality and divergence across languages, such as the locally shared features among similar languages, which leads to sub-optimal transfer, especially in massively multilingual translation. In this paper, we propose a novel token-level feature mixing method that enables the model to capture different features and dynamically determine the feature sharing across languages. Based on the observation that the tokens in the multilingual model are usually shared by different languages, we we insert a feature mixing layer into each Transformer sublayer and model each token representation as a mix of different features, with a proportion indicating its feature preference. In this way, we can perform fine-grained feature sharing and achieve better multilingual transfer. Experimental results on multilingual datasets show that our method outperforms various strong baselines and can be extended to zero-shot translation. Further analyses reveal that our method can capture different linguistic features and bridge the representation gap across languages.", + "author": "Junpeng Liu; Kaiyu Huang; Jiuyi Li; Huan Liu; Jinsong Su; Degen Huang", + "authorids": "/j/junpeng-liu/; /k/kaiyu-huang/; /j/jiuyi-li/; /h/huan-liu/; /j/jinsong-su/; /d/degen-huang/", + "bibtex": "@inproceedings{liu-etal-2022-adaptive,\n title = \"Adaptive Token-level Cross-lingual Feature Mixing for Multilingual Neural Machine Translation\",\n author = \"Liu, Junpeng and\n Huang, Kaiyu and\n Li, Jiuyi and\n Liu, Huan and\n Su, Jinsong and\n Huang, Degen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.687/\",\n doi = \"10.18653/v1/2022.emnlp-main.687\",\n pages = \"10097--10113\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.687.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.687/", + "pdf_size": 2201793, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9558388006103528849&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 0, + "aff": "Dalian University of Technology; Institute for AI Industry Research, Tsinghua University; Dalian University of Technology; Dalian University of Technology; Xiamen University; Dalian University of Technology", + "aff_domain": "mail.dlut.edu.cn;air.tsinghua.edu.cn;mail.dlut.edu.cn;mail.dlut.edu.cn;xmu.edu.cn;dlut.edu.cn", + "email": "mail.dlut.edu.cn;air.tsinghua.edu.cn;mail.dlut.edu.cn;mail.dlut.edu.cn;xmu.edu.cn;dlut.edu.cn", + "github": "https://github.com/raburabu91/HiTrans", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;2;0", + "aff_unique_norm": "Dalian University of Technology;Tsinghua University;Xiamen University", + "aff_unique_dep": ";Institute for AI Industry Research;", + "aff_unique_url": "http://www.dlut.edu.cn/;https://www.tsinghua.edu.cn;https://www.xmu.edu.cn", + "aff_unique_abbr": "DUT;Tsinghua;XMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.405", + "title": "Adversarial Concept Erasure in Kernel Space", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The representation space of neural models for textual data emerges in an unsupervised manner during training. Understanding how human-interpretable concepts, such as gender, are encoded in these representations would improve the ability of users to control the content of these representations and analyze the working of the models that rely on them. One prominent approach to the control problem is the identification and removal of linear concept subspaces \u2013 subspaces in the representation space that correspond to a given concept. While those are tractable and interpretable, neural network do not necessarily represent concepts in linear subspaces. We propose a kernelization of the recently-proposed linear concept-removal objective, and show that it is effective in guarding against the ability of certain nonlinear adversaries to recover the concept. Interestingly, our findings suggest that the division between linear and nonlinear models is overly simplistic: when considering the concept of binary gender and its neutralization, we do not find a single kernel space that exclusively contains all the concept-related information. It is therefore challenging to protect against all nonlinear adversaries at once.", + "author": "Shauli Ravfogel; Francisco Vargas; Yoav Goldberg; Ryan Cotterell", + "authorids": "/s/shauli-ravfogel/; /f/francisco-vargas/; /y/yoav-goldberg/; /r/ryan-cotterell/", + "bibtex": "@inproceedings{ravfogel-etal-2022-adversarial,\n title = \"Adversarial Concept Erasure in Kernel Space\",\n author = \"Ravfogel, Shauli and\n Vargas, Francisco and\n Goldberg, Yoav and\n Cotterell, Ryan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.405/\",\n doi = \"10.18653/v1/2022.emnlp-main.405\",\n pages = \"6034--6055\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.405.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.405/", + "pdf_size": 411314, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17326995876526449768&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 0, + "aff": "Bar-Ilan University+Allen Institute for Artificial Intelligence; University of Cambridge; Bar-Ilan University+Allen Institute for Artificial Intelligence; ETH Z\u00fcrich", + "aff_domain": "gmail.com;cam.ac.uk;gmail.com;inf.ethz.ch", + "email": "gmail.com;cam.ac.uk;gmail.com;inf.ethz.ch", + "github": "https://github.com/shauli-ravfogel/adv-kernel-removal", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;0+1;3", + "aff_unique_norm": "Bar-Ilan University;Allen Institute for Artificial Intelligence;University of Cambridge;ETH Z\u00fcrich", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.biu.ac.il;https://allenai.org;https://www.cam.ac.uk;https://www.ethz.ch", + "aff_unique_abbr": "BIU;AI2;Cambridge;ETHZ", + "aff_campus_unique_index": ";1;", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0+1;2;0+1;3", + "aff_country_unique": "Israel;United States;United Kingdom;Switzerland" + }, + { + "id": "2022.emnlp-main.80", + "title": "Affective Idiosyncratic Responses to Music", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Affective responses to music are highly personal. Despite consensus that idiosyncratic factors play a key role in regulating how listeners emotionally respond to music, precisely measuring the marginal effects of these variables has proved challenging. To address this gap, we develop computational methods to measure affective responses to music from over 403M listener comments on a Chinese social music platform. Building on studies from music psychology in systematic and quasi-causal analyses, we test for musical, lyrical, contextual, demographic, and mental health effects that drive listener affective responses. Finally, motivated by the social phenomenon known as \u7f51\u6291\u4e91 (w\u01ceng-y\u00ec-y\u00fan), we identify influencing factors of platform user self-disclosures, the social support they receive, and notable differences in discloser user activity.", + "author": "Sky CH-Wang; Evan Li; Oliver Li; Smaranda Muresan; Zhou Yu", + "authorids": "/s/sky-ch-wang/; /e/evan-li/; /o/oliver-li/; /s/smaranda-muresan/; /z/zhou-yu/", + "bibtex": "@inproceedings{ch-wang-etal-2022-affective,\n title = \"Affective Idiosyncratic Responses to Music\",\n author = \"CH-Wang, Sky and\n Li, Evan and\n Li, Oliver and\n Muresan, Smaranda and\n Yu, Zhou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.80/\",\n doi = \"10.18653/v1/2022.emnlp-main.80\",\n pages = \"1220--1250\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.80.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.80/", + "pdf_size": 3111483, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10626241356473588538&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Department of Computer Science, Columbia University; Department of Computer Science, Columbia University; Department of Computer Science, Columbia University; Department of Computer Science, Columbia University + Data Science Institute, Columbia University; Data Science Institute, Columbia University", + "aff_domain": "cs.columbia.edu;columbia.edu;columbia.edu;columbia.edu;columbia.edu", + "email": "cs.columbia.edu;columbia.edu;columbia.edu;columbia.edu;columbia.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0+0;0", + "aff_unique_norm": "Columbia University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.columbia.edu", + "aff_unique_abbr": "Columbia", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.359", + "title": "Affective Knowledge Enhanced Multiple-Graph Fusion Networks for Aspect-based Sentiment Analysis", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Aspect-based sentiment analysis aims to identify sentiment polarity of social media users toward different aspects. Most recent methods adopt the aspect-centric latent tree to connect aspects and their corresponding opinion words, thinking that would facilitate establishing the relationship between aspects and opinion words.However, these methods ignore the roles of syntax dependency relation labels and affective semantic information in determining the sentiment polarity, resulting in the wrong prediction.In this paper, we propose a novel multi-graph fusion network (MGFN) based on latent graph to leverage the richer syntax dependency relation label information and affective semantic information of words.Specifically, we construct a novel syntax-aware latent graph (SaLG) to fully leverage the syntax dependency relation label information to facilitate the learning of sentiment representations. Subsequently, a multi-graph fusion module is proposed to fuse semantic information of surrounding contexts of aspects adaptively. Furthermore, we design an affective refinement strategy to guide the MGFN to capture significant affective clues. Extensive experiments on three datasets demonstrate that our MGFN model outperforms all state-of-the-art methods and verify the effectiveness of our model.", + "author": "Siyu Tang; Heyan Chai; Ziyi Yao; Ye Ding; Cuiyun Gao; Binxing Fang; Qing Liao", + "authorids": "/s/siyu-tang/; /h/heyan-chai/; /z/ziyi-yao/; /y/ye-ding/; /c/cuiyun-gao/; /b/binxing-fang/; /q/qing-liao/", + "bibtex": "@inproceedings{tang-etal-2022-affective,\n title = \"Affective Knowledge Enhanced Multiple-Graph Fusion Networks for Aspect-based Sentiment Analysis\",\n author = \"Tang, Siyu and\n Chai, Heyan and\n Yao, Ziyi and\n Ding, Ye and\n Gao, Cuiyun and\n Fang, Binxing and\n Liao, Qing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.359/\",\n doi = \"10.18653/v1/2022.emnlp-main.359\",\n pages = \"5352--5362\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.359.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.359/", + "pdf_size": 903454, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1414310653525012140&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "Harbin Institute of Technology, Shenzhen, China; Harbin Institute of Technology, Shenzhen, China; Harbin Institute of Technology, Shenzhen, China; Dongguan University of Technology, China; Harbin Institute of Technology, Shenzhen, China; Harbin Institute of Technology, Shenzhen, China + Peng Cheng Laboratory, Shenzhen, China; Harbin Institute of Technology, Shenzhen, China + Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "stu.hit.edu.cn;stu.hit.edu.cn;stu.hit.edu.cn;dgut.edu.cn;hit.edu.cn;cae.cn;hit.edu.cn", + "email": "stu.hit.edu.cn;stu.hit.edu.cn;stu.hit.edu.cn;dgut.edu.cn;hit.edu.cn;cae.cn;hit.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;0;0+2;0+2", + "aff_unique_norm": "Harbin Institute of Technology;Dongguan University of Technology;Peng Cheng Laboratory", + "aff_unique_dep": ";;", + "aff_unique_url": "http://en.hhit.edu.cn/;http://www.dgut.edu.cn;", + "aff_unique_abbr": "HIT;;", + "aff_campus_unique_index": "0;0;0;0;0+0;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.597", + "title": "AfriCLIRMatrix: Enabling Cross-Lingual Information Retrieval for African Languages", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Language diversity in NLP is critical in enabling the development of tools for a wide range of users.However, there are limited resources for building such tools for many languages, particularly those spoken in Africa.For search, most existing datasets feature few or no African languages, directly impacting researchers\u2019 ability to build and improve information access capabilities in those languages.Motivated by this, we created AfriCLIRMatrix, a test collection for cross-lingual information retrieval research in 15 diverse African languages.In total, our dataset contains 6 million queries in English and 23 million relevance judgments automatically mined from Wikipedia inter-language links, covering many more African languages than any existing information retrieval test collection.In addition, we release BM25, dense retrieval, and sparse\u2013dense hybrid baselines to provide a starting point for the development of future systems.We hope that these efforts can spur additional work in search for African languages.AfriCLIRMatrix can be downloaded at https://github.com/castorini/africlirmatrix.", + "author": "Odunayo Ogundepo; Xinyu Zhang; Shuo Sun; Kevin Duh; Jimmy Lin", + "authorids": "/o/odunayo-ogundepo/; /x/xinyu-zhang/; /s/shuo-sun/; /k/kevin-duh/; /j/jimmy-lin/", + "bibtex": "@inproceedings{ogundepo-etal-2022-africlirmatrix,\n title = \"{A}fri{CLIRM}atrix: Enabling Cross-Lingual Information Retrieval for {A}frican Languages\",\n author = \"Ogundepo, Odunayo and\n Zhang, Xinyu and\n Sun, Shuo and\n Duh, Kevin and\n Lin, Jimmy\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.597/\",\n doi = \"10.18653/v1/2022.emnlp-main.597\",\n pages = \"8721--8728\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.597.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.597/", + "pdf_size": 201624, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6580169047890162246&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "David R. Cheriton School of Computer Science, University of Waterloo; David R. Cheriton School of Computer Science, University of Waterloo; John Hopkins University; John Hopkins University; David R. Cheriton School of Computer Science, University of Waterloo", + "aff_domain": "uwaterloo.ca;uwaterloo.ca;jhu.edu;cs.jhu.edu;uwaterloo.ca", + "email": "uwaterloo.ca;uwaterloo.ca;jhu.edu;cs.jhu.edu;uwaterloo.ca", + "github": "https://github.com/castorini/africlirmatrix", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;1;0", + "aff_unique_norm": "University of Waterloo;Johns Hopkins University", + "aff_unique_dep": "David R. Cheriton School of Computer Science;", + "aff_unique_url": "https://uwaterloo.ca;https://www.jhu.edu", + "aff_unique_abbr": "UWaterloo;JHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;1;0", + "aff_country_unique": "Canada;United States" + }, + { + "id": "2022.emnlp-main.128", + "title": "AfroLID: A Neural Language Identification Tool for African Languages", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Language identification (LID) is a crucial precursor for NLP, especially for mining web data. Problematically, most of the world\u2019s 7000+ languages today are not covered by LID technologies. We address this pressing issue for Africa by introducing AfroLID, a neural LID toolkit for 517 African languages and varieties. AfroLID exploits a multi-domain web dataset manually curated from across 14 language families utilizing five orthographic systems. When evaluated on our blind Test set, AfroLID achieves 95.89 F_1-score. We also compare AfroLID to five existing LID tools that each cover a small number of African languages, finding it to outperform them on most languages. We further show the utility of AfroLID in the wild by testing it on the acutely under-served Twitter domain. Finally, we offer a number of controlled case studies and perform a linguistically-motivated error analysis that allow us to both showcase AfroLID\u2019s powerful capabilities and limitations", + "author": "Ife Adebara; AbdelRahim Elmadany; Muhammad Abdul-Mageed; Alcides Inciarte", + "authorids": "/i/ife-adebara/; /a/abdelrahim-elmadany/; /m/muhammad-abdul-mageed/; /a/alcides-inciarte/", + "bibtex": "@inproceedings{adebara-etal-2022-afrolid,\n title = \"{A}fro{LID}: A Neural Language Identification Tool for {A}frican Languages\",\n author = \"Adebara, Ife and\n Elmadany, AbdelRahim and\n Abdul-Mageed, Muhammad and\n Inciarte, Alcides\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.128/\",\n doi = \"10.18653/v1/2022.emnlp-main.128\",\n pages = \"1958--1981\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.128.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.128/", + "pdf_size": 3200895, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11998438032371294771&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Deep Learning & Natural Language Processing Group, The University of British Columbia; Deep Learning & Natural Language Processing Group, The University of British Columbia; Deep Learning & Natural Language Processing Group, The University of British Columbia + Department of Natural Language Processing & Department of Machine Learning, MBZUAI; Deep Learning & Natural Language Processing Group, The University of British Columbia", + "aff_domain": "ubc.ca;ubc.ca;ubc.ca;mail.ubc.ca", + "email": "ubc.ca;ubc.ca;ubc.ca;mail.ubc.ca", + "github": "https://github.com/UBC-NLP/afrolid", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;0", + "aff_unique_norm": "The University of British Columbia;MBZUAI", + "aff_unique_dep": "Department of Computer Science;Department of Natural Language Processing", + "aff_unique_url": "https://www.ubc.ca;https://www.mbzuai.ac.ae", + "aff_unique_abbr": "UBC;MBZUAI", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Vancouver;", + "aff_country_unique_index": "0;0;0+1;0", + "aff_country_unique": "Canada;United Arab Emirates" + }, + { + "id": "2022.emnlp-main.795", + "title": "Agent-Specific Deontic Modality Detection in Legal Language", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Legal documents are typically long and written in legalese, which makes it particularly difficult for laypeople to understand their rights and duties. While natural language understanding technologies can be valuable in supporting such understanding in the legal domain, the limited availability of datasets annotated for deontic modalities in the legal domain, due to the cost of hiring experts and privacy issues, is a bottleneck. To this end, we introduce, LEXDEMOD, a corpus of English contracts annotatedwith deontic modality expressed with respect to a contracting party or agent along with the modal triggers. We benchmark this dataset on two tasks: (i) agent-specific multi-label deontic modality classification, and (ii) agent-specific deontic modality and trigger span detection using Transformer-based (Vaswani et al., 2017) language models. Transfer learning experiments show that the linguistic diversity of modal expressions in LEXDEMOD generalizes reasonably from lease to employment andrental agreements. A small case study indicates that a model trained on LEXDEMOD can detect red flags with high recall. We believe our work offers a new research direction for deontic modality detection in the legal domain.", + "author": "Abhilasha Sancheti; Aparna Garimella; Balaji Vasan Srinivasan; Rachel Rudinger", + "authorids": "/a/abhilasha-sancheti/; /a/aparna-garimella/; /b/balaji-vasan-srinivasan/; /r/rachel-rudinger/", + "bibtex": "@inproceedings{sancheti-etal-2022-agent,\n title = \"Agent-Specific Deontic Modality Detection in Legal Language\",\n author = \"Sancheti, Abhilasha and\n Garimella, Aparna and\n Srinivasan, Balaji Vasan and\n Rudinger, Rachel\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.795/\",\n doi = \"10.18653/v1/2022.emnlp-main.795\",\n pages = \"11563--11579\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.795.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.795/", + "pdf_size": 3367913, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14192798918630211246&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "University of Maryland, College Park + Adobe Research; Adobe Research; Adobe Research; University of Maryland, College Park", + "aff_domain": "umd.edu;adobe.com;adobe.com;umd.edu", + "email": "umd.edu;adobe.com;adobe.com;umd.edu", + "github": "https://github.com/abhilashasancheti/LexDeMod", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;1;0", + "aff_unique_norm": "University of Maryland;Adobe", + "aff_unique_dep": ";Adobe Research", + "aff_unique_url": "https://www/umd.edu;https://research.adobe.com", + "aff_unique_abbr": "UMD;Adobe", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "College Park;", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.567", + "title": "Algorithms for Acyclic Weighted Finite-State Automata with Failure Arcs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Weighted finite-state automata (WSFAs) arecommonly used in NLP. Failure transitions area useful extension for compactly representingbackoffs or interpolation in n-gram modelsand CRFs, which are special cases of WFSAs.Unfortunately, applying standard algorithmsfor computing the pathsum requires expand-ing these compact failure transitions. As aresult, na \u0308\u0131ve computation of the pathsum inacyclic WFSAs with failure transitions runs inO(|Q|2|\u03a3|) (O(|Q||\u03a3|) for deterministic WF-SAs) while the equivalent algorithm in normalWFSAs runs in O(|E|), where E representsthe set of transitions, Q the set of states, and\u03a3 the alphabet. In this work, we present moreefficient algorithms for computing the pathsumin sparse acyclic WFSAs, i.e., WFSAs with av-erage out symbol fraction s \u226a 1. In those,backward runs in O(s|Q||\u03a3|). We proposean algorithm for semiring-weighted automatawhich runs in O(|E| + s|\u03a3||Q||Tmax| log |\u03a3|),where |Tmax| is the size of the largest con-nected component of failure transitions. Ad-ditionally, we propose faster algorithms fortwo specific cases. For ring-weighted WF-SAs we propose an algorithm with complex-ity O(|E| + s|\u03a3||Q||\u03c0max|), where |\u03c0max| de-notes the longest path length of failure transi-tions stemming from q and \u03a3(q) the set of sym-bols on the outgoing transitions from q. Forsemiring-weighted WFSAs whose failure tran-sition topology satisfies a condition exemplifiedby CRFs, we propose an algorithm with com-plexity O(|E| + s|\u03a3||Q| log |\u03a3|).", + "author": "Anej Svete; Benjamin Dayan; Ryan Cotterell; Tim Vieira; Jason Eisner", + "authorids": "/a/anej-svete/; /b/benjamin-dayan/; /r/ryan-cotterell/; /t/tim-vieira/; /j/jason-eisner/", + "bibtex": "@inproceedings{svete-etal-2022-algorithms,\n title = \"Algorithms for Acyclic Weighted Finite-State Automata with Failure Arcs\",\n author = \"Svete, Anej and\n Dayan, Benjamin and\n Cotterell, Ryan and\n Vieira, Tim and\n Eisner, Jason\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.567/\",\n doi = \"10.18653/v1/2022.emnlp-main.567\",\n pages = \"8289--8305\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.567.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.567/", + "pdf_size": 587333, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7139984426529045302&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "ETH Z\u00fcrich; ETH Z\u00fcrich; Johns Hopkins University; ETH Z\u00fcrich; Johns Hopkins University", + "aff_domain": "ethz.ch;ethz.ch;cs.jhu.edu;inf.ethz.ch;cs.jhu.edu", + "email": "ethz.ch;ethz.ch;cs.jhu.edu;inf.ethz.ch;cs.jhu.edu", + "github": "https://github.com/rycolab/failure-backward", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;1", + "aff_unique_norm": "ETH Z\u00fcrich;Johns Hopkins University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ethz.ch;https://www.jhu.edu", + "aff_unique_abbr": "ETHZ;JHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;1", + "aff_country_unique": "Switzerland;United States" + }, + { + "id": "2022.emnlp-main.656", + "title": "Algorithms for Weighted Pushdown Automata", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Weighted pushdown automata (WPDAs) are at the core of many natural language processing tasks, like syntax-based statistical machine translation and transition-based dependency parsing. As most existing dynamic programming algorithms are designed for context-free grammars (CFGs), algorithms for PDAs often resort to a PDA-to-CFG conversion. In this paper, we develop novel algorithms that operate directly on WPDAs. Our algorithms are inspired by Lang\u2019s algorithm, but use a more general definition of pushdown automaton and either reduce the space requirements by a factor of |Gamma| (the size of the stack alphabet) or reduce the runtime by a factor of more than |Q| (the number of states). When run on the same class of PDAs as Lang\u2019s algorithm, our algorithm is both more space-efficient by a factor of |Gamma| and more time-efficient by a factor of |Q| x |Gamma|.", + "author": "Alexandra Butoi; Brian DuSell; Tim Vieira; Ryan Cotterell; David Chiang", + "authorids": "/a/alexandra-butoi/; /b/brian-dusell/; /t/tim-vieira/; /r/ryan-cotterell/; /d/david-chiang/", + "bibtex": "@inproceedings{butoi-etal-2022-algorithms,\n title = \"Algorithms for Weighted Pushdown Automata\",\n author = \"Butoi, Alexandra and\n DuSell, Brian and\n Vieira, Tim and\n Cotterell, Ryan and\n Chiang, David\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.656/\",\n doi = \"10.18653/v1/2022.emnlp-main.656\",\n pages = \"9669--9680\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.656.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.656/", + "pdf_size": 324487, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9300015790361052922&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "ETH Z\u00fcrich; University of Notre Dame; Johns Hopkins University; ETH Z\u00fcrich; University of Notre Dame", + "aff_domain": "inf.ethz.ch;nd.edu;gmail.com;gmail.com;nd.edu", + "email": "inf.ethz.ch;nd.edu;gmail.com;gmail.com;nd.edu", + "github": "https://github.com/rycolab/wpda", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;1", + "aff_unique_norm": "ETH Z\u00fcrich;University of Notre Dame;Johns Hopkins University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ethz.ch;https://www.nd.edu;https://www.jhu.edu", + "aff_unique_abbr": "ETHZ;Notre Dame;JHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0;1", + "aff_country_unique": "Switzerland;United States" + }, + { + "id": "2022.emnlp-main.36", + "title": "Aligning Recommendation and Conversation via Dual Imitation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Human conversations of recommendation naturally involve the shift of interests which can align the recommendation actions and conversation process to make accurate recommendations with rich explanations. However, existing conversational recommendation systems (CRS) ignore the advantage of user interest shift in connecting recommendation and conversation, which leads to an ineffective loose coupling structure of CRS. To address this issue, by modeling the recommendation actions as recommendation paths in a knowledge graph (KG), we propose DICR (Dual Imitation for Conversational Recommendation), which designs a dual imitation to explicitly align the recommendation paths and user interest shift paths in a recommendation module and a conversation module, respectively. By exchanging alignment signals, DICR achieves bidirectional promotion between recommendation and conversation modules and generates high-quality responses with accurate recommendations and coherent explanations. Experiments demonstrate that DICR outperforms the state-of-the-art models on recommendation and conversation performance with automatic, human, and novel explainability metrics.", + "author": "Jinfeng Zhou; Bo Wang; Minlie Huang; Dongming Zhao; Kun Huang; Ruifang He; Yuexian Hou", + "authorids": "/j/jinfeng-zhou/; /b/bo-wang/; /m/minlie-huang/; /d/dongming-zhao/; /k/kun-huang/; /r/ruifang-he/; /y/yuexian-hou/", + "bibtex": "@inproceedings{zhou-etal-2022-aligning,\n title = \"Aligning Recommendation and Conversation via Dual Imitation\",\n author = \"Zhou, Jinfeng and\n Wang, Bo and\n Huang, Minlie and\n Zhao, Dongming and\n Huang, Kun and\n He, Ruifang and\n Hou, Yuexian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.36/\",\n doi = \"10.18653/v1/2022.emnlp-main.36\",\n pages = \"549--561\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.36.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.36/", + "pdf_size": 682378, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7518434171275804760&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "", + "project": "", + "author_num": 7 + }, + { + "id": "2022.findings-emnlp.168", + "title": "Alleviating Sparsity of Open Knowledge Graphs with Ternary Contrastive Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Sparsity of formal knowledge and roughness of non-ontological construction make sparsity problem particularly prominent in Open Knowledge Graphs (OpenKGs). Due to sparse links, learning effective representation for few-shot entities becomes difficult. We hypothesize that by introducing negative samples, a contrastive learning (CL) formulation could be beneficial in such scenarios. However, existing CL methods model KG triplets as binary objects of entities ignoring the relation-guided ternary propagation patterns and they are too generic, i.e., they ignore zero-shot, few-shot and synonymity problems that appear in OpenKGs. To address this, we propose TernaryCL, a CL framework based on ternary propagation patterns among head, relation and tail. TernaryCL designs Contrastive Entity and Contrastive Relation to mine ternary discriminative features with both negative entities and relations, introduces Contrastive Self to help zero- and few-shot entities learn discriminative features, Contrastive Synonym to model synonymous entities, and Contrastive Fusion to aggregate graph features from multiple paths. Extensive experiments on benchmarks demonstrate the superiority of TernaryCL over state-of-the-art models.", + "author": "Qian Li; Shafiq Joty; Daling Wang; Shi Feng; Yifei Zhang", + "authorids": "/q/qian-li/; /s/shafiq-joty/; /d/daling-wang/; /s/shi-feng/; /y/yifei-zhang/", + "bibtex": "@inproceedings{li-etal-2022-alleviating,\n title = \"Alleviating Sparsity of Open Knowledge Graphs with Ternary Contrastive Learning\",\n author = \"Li, Qian and\n Joty, Shafiq and\n Wang, Daling and\n Feng, Shi and\n Zhang, Yifei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.168/\",\n doi = \"10.18653/v1/2022.findings-emnlp.168\",\n pages = \"2279--2291\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.168.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.168/", + "pdf_size": 890907, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2022736870696199402&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Northeastern University, China+ Nanyang Technological University, Singapore; Nanyang Technological University, Singapore+ Salesforce Research; Northeastern University, China; Northeastern University, China; Northeastern University, China", + "aff_domain": "foxmail.com;ntu.edu.sg;cse.neu.edu.cn;cse.neu.edu.cn;cse.neu.edu.cn", + "email": "foxmail.com;ntu.edu.sg;cse.neu.edu.cn;cse.neu.edu.cn;cse.neu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1+2;0;0;0", + "aff_unique_norm": "Northeastern University;Nanyang Technological University;Salesforce", + "aff_unique_dep": ";;Salesforce Research", + "aff_unique_url": "http://www.neu.edu.cn/;https://www.ntu.edu.sg;https://research.salesforce.com", + "aff_unique_abbr": "NEU;NTU;Salesforce", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1+2;0;0;0", + "aff_country_unique": "China;Singapore;United States" + }, + { + "id": "2022.findings-emnlp.240", + "title": "AlphaTuning: Quantization-Aware Parameter-Efficient Adaptation of Large-Scale Pre-Trained Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "There are growing interests in adapting large-scale language models using parameter-efficient fine-tuning methods. However, accelerating the model itself and achieving better inference efficiency through model compression has not been thoroughly explored yet.Model compression could provide the benefits of reducing memory footprints, enabling low-precision computations, and ultimately achieving cost-effective inference.To combine parameter-efficient adaptation and model compression, we propose AlphaTuning consisting of post-training quantization of the pre-trained language model and fine-tuning only some parts of quantized parameters for a target task.Specifically, AlphaTuning works by employing binary-coding quantization, which factorizes the full-precision parameters into binary parameters and a separate set of scaling factors.During the adaptation phase, the binary values are frozen for all tasks, while the scaling factors are fine-tuned for the downstream task.We demonstrate that AlphaTuning, when applied to GPT-2 and OPT, performs competitively with full fine-tuning on a variety of downstream tasks while achieving >10x compression ratio under 4-bit quantization and >1,000x reduction in the number of trainable parameters.", + "author": "Se Jung Kwon; Jeonghoon Kim; Jeongin Bae; Kang Min Yoo; Jin-Hwa Kim; Baeseong Park; Byeongwook Kim; Jung-Woo Ha; Nako Sung; Dongsoo Lee", + "authorids": "/s/se-jung-kwon/; /j/jeonghoon-kim/; /j/jeongin-bae/; /k/kang-min-yoo/; /j/jin-hwa-kim/; /b/baeseong-park/; /b/byeongwook-kim/; /j/jung-woo-ha/; /n/nako-sung/; /d/dongsoo-lee/", + "bibtex": "@inproceedings{kwon-etal-2022-alphatuning,\n title = \"{A}lpha{T}uning: Quantization-Aware Parameter-Efficient Adaptation of Large-Scale Pre-Trained Language Models\",\n author = \"Kwon, Se Jung and\n Kim, Jeonghoon and\n Bae, Jeongin and\n Yoo, Kang Min and\n Kim, Jin-Hwa and\n Park, Baeseong and\n Kim, Byeongwook and\n Ha, Jung-Woo and\n Sung, Nako and\n Lee, Dongsoo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.240/\",\n doi = \"10.18653/v1/2022.findings-emnlp.240\",\n pages = \"3288--3305\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.240.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.240/", + "pdf_size": 610648, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=713494232079091702&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "NAVER CLOVA+NAVER AI Lab+SNU AIIS; NAVER CLOVA+NAVER AI Lab+SNU AIIS; NAVER CLOVA+KAIST; NAVER CLOVA+NAVER AI Lab+SNU AIIS; NAVER AI Lab+SNU AIIS; NAVER CLOVA; NAVER CLOVA; NAVER AI Lab; NAVER CLOVA; NAVER CLOVA", + "aff_domain": "navercorp.com; ; ; ; ; ; ; ; ; ", + "email": "navercorp.com; ; ; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0+0+1;0+0+1;0+2;0+0+1;0+1;0;0;0;0;0", + "aff_unique_norm": "NAVER Corporation;Seoul National University;Korea Advanced Institute of Science and Technology", + "aff_unique_dep": "CLOVA;Artificial Intelligence Institute;", + "aff_unique_url": "https://www.naver.com;http://aiis.snu.ac.kr;https://www.kaist.ac.kr", + "aff_unique_abbr": "NAVER;SNU;KAIST", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0+0;0+0;0+0+0;0+0;0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.emnlp-main.493", + "title": "An Adaptive Logical Rule Embedding Model for Inductive Reasoning over Temporal Knowledge Graphs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Temporal knowledge graphs (TKGs) extrapolation reasoning predicts future events based on historical information, which has great research significance and broad application value. Existing methods can be divided into embedding-based methods and logical rule-based methods. Embedding-based methods rely on learned entity and relation embeddings to make predictions and thus lack interpretability. Logical rule-based methods bring scalability problems due to being limited by the learned logical rules. We combine the two methods to capture deep causal logic by learning rule embeddings, and propose an interpretable model for temporal knowledge graph reasoning called adaptive logical rule embedding model for inductive reasoning (ALRE-IR). ALRE-IR can adaptively extract and assess reasons contained in historical events, and make predictions based on causal logic. Furthermore, we propose a one-class augmented matching loss for optimization. When evaluated on the ICEWS14, ICEWS0515 and ICEWS18 datasets, the performance of ALRE-IR outperforms other state-of-the-art baselines. The results also demonstrate that ALRE-IR still shows outstanding performance when transferred to related dataset with common relation vocabulary, indicating our proposed model has good zero-shot reasoning ability.", + "author": "Xin Mei; Libin Yang; Xiaoyan Cai; Zuowei Jiang", + "authorids": "/x/xin-mei/; /l/libin-yang/; /x/xiaoyan-cai/; /z/zuowei-jiang/", + "bibtex": "@inproceedings{mei-etal-2022-adaptive,\n title = \"An Adaptive Logical Rule Embedding Model for Inductive Reasoning over Temporal Knowledge Graphs\",\n author = \"Mei, Xin and\n Yang, Libin and\n Cai, Xiaoyan and\n Jiang, Zuowei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.493/\",\n doi = \"10.18653/v1/2022.emnlp-main.493\",\n pages = \"7304--7316\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.493.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.493/", + "pdf_size": 331331, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13475206016702312836&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Northwestern Polytechnical University, Xi\u2019an, China; Northwestern Polytechnical University, Xi\u2019an, China; Northwestern Polytechnical University, Xi\u2019an, China; Northwestern Polytechnical University, Xi\u2019an, China", + "aff_domain": "mail.nwpu.edu.cn;nwpu.edu.cn;mail.nwpu.edu.cn;nwpu.edu.cn", + "email": "mail.nwpu.edu.cn;nwpu.edu.cn;mail.nwpu.edu.cn;nwpu.edu.cn", + "github": "https://github.com/mxadorable/ALRET-IR", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Northwestern Polytechnical University", + "aff_unique_dep": "", + "aff_unique_url": "http://www.nwpu.edu.cn", + "aff_unique_abbr": "NWPU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Xi'an", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.362", + "title": "An Anchor-based Relative Position Embedding Method for Cross-Modal Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Position Embedding (PE) is essential for transformer to capture the sequence ordering of input tokens. Despite its general effectiveness verified in Natural Language Processing (NLP) and Computer Vision (CV), its application in cross-modal tasks remains unexplored and suffers from two challenges: 1) the input text tokens and image patches are not aligned, 2) the encoding space of each modality is different, making it unavailable for feature comparison. In this paper, we propose a unified position embedding method for these problems, called AnChor-basEd Relative Position Embedding (ACE-RPE), in which we first introduce an anchor locating mechanism to bridge the semantic gap and locate anchors from different modalities. Then we conduct the distance calculation of each text token and image patch by computing their shortest paths from the located anchors. Last, we embed the anchor-based distance to guide the computation of cross-attention. In this way, it calculates cross-modal relative position embedding for cross-modal transformer. Benefiting from ACE-RPE, our method obtains new SOTA results on a wide range of benchmarks, such as Image-Text Retrieval on MS-COCO and Flickr30K, Visual Entailment on SNLI-VE, Visual Reasoning on NLVR2 and Weakly-supervised Visual Grounding on RefCOCO+.", + "author": "Ya Wang; Xingwu Sun; Lian Fengzong; ZhanHui Kang; Chengzhong Xu Xu", + "authorids": "/y/ya-wang/; /x/xingwu-sun/; /l/lian-fengzong/; /z/zhanhui-kang/; /c/chengzhong-xu-xu/", + "bibtex": "@inproceedings{wang-etal-2022-anchor,\n title = \"An Anchor-based Relative Position Embedding Method for Cross-Modal Tasks\",\n author = \"Wang, Ya and\n Sun, Xingwu and\n Fengzong, Lian and\n Kang, ZhanHui and\n Xu, Chengzhong Xu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.362/\",\n doi = \"10.18653/v1/2022.emnlp-main.362\",\n pages = \"5401--5413\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.362.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.362/", + "pdf_size": 10890744, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2406615349708741903&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "Machine Learning Platform Department, Tencent1 + State Key Lab of IOTSC, Department of Computer Science, University of Macau2; Machine Learning Platform Department, Tencent1 + State Key Lab of IOTSC, Department of Computer Science, University of Macau2; Machine Learning Platform Department, Tencent1; Machine Learning Platform Department, Tencent1; State Key Lab of IOTSC, Department of Computer Science, University of Macau2", + "aff_domain": "tencent.com;tencent.com;tencent.com;tencent.com;um.edu.mo", + "email": "tencent.com;tencent.com;tencent.com;tencent.com;um.edu.mo", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0;0;1", + "aff_unique_norm": "Tencent;University of Macau", + "aff_unique_dep": "Machine Learning Platform Department;Department of Computer Science", + "aff_unique_url": "https://www.tencent.com;https://www.um.edu.mo", + "aff_unique_abbr": "Tencent;UM", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0+1;0;0;1", + "aff_country_unique": "China;Macau" + }, + { + "id": "2022.emnlp-main.346", + "title": "An Efficient Memory-Augmented Transformer for Knowledge-Intensive NLP Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Access to external knowledge is essential for many natural language processing tasks, such as question answering and dialogue. Existing methods often rely on a parametric model that stores knowledge in its parameters, or use a retrieval-augmented model that has access to an external knowledge source. Parametric and retrieval-augmented models have complementary strengths in terms of computational efficiency and predictive accuracy. To combine the strength of both approaches, we propose the Efficient Memory-Augmented Transformer (EMAT) \u2013 it encodes external knowledge into a key-value memory and exploits the fast maximum inner product search for memory querying. We also introduce pre-training tasks that allow EMAT to encode informative key-value representations, and to learn an implicit strategy to integrate multiple memory slots into the transformer. Experiments on various knowledge-intensive tasks such as question answering and dialogue datasets show that, simply augmenting parametric models (T5-base) using our method produces more accurate results (e.g., 25.8 \u2192 44.3 EM on NQ) while retaining a high throughput (e.g., 1000 queries/s on NQ). Compared to retrieval-augmented models, EMAT runs substantially faster across the board and produces more accurate results on WoW and ELI5.", + "author": "Yuxiang Wu; Yu Zhao; Baotian Hu; Pasquale Minervini; Pontus Stenetorp; Sebastian Riedel", + "authorids": "/y/yuxiang-wu/; /y/yu-zhao/; /b/baotian-hu/; /p/pasquale-minervini/; /p/pontus-stenetorp/; /s/sebastian-riedel/", + "bibtex": "@inproceedings{wu-etal-2022-efficient,\n title = \"An Efficient Memory-Augmented Transformer for Knowledge-Intensive {NLP} Tasks\",\n author = \"Wu, Yuxiang and\n Zhao, Yu and\n Hu, Baotian and\n Minervini, Pasquale and\n Stenetorp, Pontus and\n Riedel, Sebastian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.346/\",\n doi = \"10.18653/v1/2022.emnlp-main.346\",\n pages = \"5184--5196\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.346.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.346/", + "pdf_size": 425864, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=548262076963709819&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University College London, London, UK+University of Edinburgh, Edinburgh, UK; Harbin Institute of Technology, Shenzhen, PRC+University College London, London, UK; Harbin Institute of Technology, Shenzhen, PRC+University College London, London, UK; University of Edinburgh, Edinburgh, UK; University College London, London, UK; University College London, London, UK", + "aff_domain": "cs.ucl.ac.uk;hit.edu.cn;hit.edu.cn;ed.ac.uk;cs.ucl.ac.uk;cs.ucl.ac.uk", + "email": "cs.ucl.ac.uk;hit.edu.cn;hit.edu.cn;ed.ac.uk;cs.ucl.ac.uk;cs.ucl.ac.uk", + "github": "https://github.com/uclnlp/EMAT", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2+0;2+0;1;0;0", + "aff_unique_norm": "University College London;University of Edinburgh;Harbin Institute of Technology", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ucl.ac.uk;https://www.ed.ac.uk;http://en.hhit.edu.cn/", + "aff_unique_abbr": "UCL;Edinburgh;HIT", + "aff_campus_unique_index": "0+1;2+0;2+0;1;0;0", + "aff_campus_unique": "London;Edinburgh;Shenzhen", + "aff_country_unique_index": "0+0;1+0;1+0;0;0;0", + "aff_country_unique": "United Kingdom;China" + }, + { + "id": "2022.emnlp-main.119", + "title": "An Empirical Analysis of Memorization in Fine-tuned Autoregressive Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large language models are shown to present privacy risks through memorization of training data, andseveral recent works have studied such risks for the pre-training phase. Little attention, however, has been given to the fine-tuning phase and it is not well understood how different fine-tuning methods (such as fine-tuning the full model, the model head, and adapter) compare in terms of memorization risk. This presents increasing concern as the \u201cpre-train and fine-tune\u201d paradigm proliferates. In this paper, we empirically study memorization of fine-tuning methods using membership inference and extraction attacks, and show that their susceptibility to attacks is very different. We observe that fine-tuning the head of the model has the highest susceptibility to attacks, whereas fine-tuning smaller adapters appears to be less vulnerable to known extraction attacks.", + "author": "Fatemehsadat Mireshghallah; Archit Uniyal; Tianhao Wang; David Evans; Taylor Berg-Kirkpatrick", + "authorids": "/f/fatemehsadat-mireshghallah/; /a/archit-uniyal/; /t/tianhao-wang/; /d/david-k-evans/; /t/taylor-berg-kirkpatrick/", + "bibtex": "@inproceedings{mireshghallah-etal-2022-empirical,\n title = \"An Empirical Analysis of Memorization in Fine-tuned Autoregressive Language Models\",\n author = \"Mireshghallah, Fatemehsadat and\n Uniyal, Archit and\n Wang, Tianhao and\n Evans, David and\n Berg-Kirkpatrick, Taylor\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.119/\",\n doi = \"10.18653/v1/2022.emnlp-main.119\",\n pages = \"1816--1826\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.119.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.119/", + "pdf_size": 5398583, + "gs_citation": 88, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16823287953776072072&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "University of California San Diego; University of Virginia; University of Virginia; University of Virginia; University of California San Diego", + "aff_domain": "ucsd.edu;virginia.edu;virginia.edu;virginia.edu;ucsd.edu", + "email": "ucsd.edu;virginia.edu;virginia.edu;virginia.edu;ucsd.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;0", + "aff_unique_norm": "University of California, San Diego;University of Virginia", + "aff_unique_dep": ";", + "aff_unique_url": "https://ucsd.edu;https://www.virginia.edu", + "aff_unique_abbr": "UCSD;UVA", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "San Diego;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.684", + "title": "An Empirical Revisiting of Linguistic Knowledge Fusion in Language Understanding Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Though linguistic knowledge emerges during large-scale language model pretraining, recent work attempt to explicitly incorporate human-defined linguistic priors into task-specific fine-tuning. Infusing language models with syntactic or semantic knowledge from parsers has shown improvements on many language understanding tasks. To further investigate the effectiveness of structural linguistic priors, we conduct empirical study of replacing parsed graphs or trees with trivial ones (rarely carrying linguistic knowledge e.g., balanced tree) for tasks in the GLUE benchmark. Encoding with trivial graphs achieves competitive or even better performance in fully-supervised and few-shot settings. It reveals that the gains might not be significantly attributed to explicit linguistic priors but rather to more feature interactions brought by fusion layers. Hence we call for attention to using trivial graphs as necessary baselines to design advanced knowledge fusion methods in the future.", + "author": "Changlong Yu; Tianyi Xiao; Lingpeng Kong; Yangqiu Song; Wilfred Ng", + "authorids": "/c/changlong-yu/; /t/tianyi-xiao/; /l/lingpeng-kong/; /y/yangqiu-song/; /w/wilfred-ng/", + "bibtex": "@inproceedings{yu-etal-2022-empirical,\n title = \"An Empirical Revisiting of Linguistic Knowledge Fusion in Language Understanding Tasks\",\n author = \"Yu, Changlong and\n Xiao, Tianyi and\n Kong, Lingpeng and\n Song, Yangqiu and\n Ng, Wilfred\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.684/\",\n doi = \"10.18653/v1/2022.emnlp-main.684\",\n pages = \"10064--10070\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.684.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.684/", + "pdf_size": 243157, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12991874172424127494&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "HKUST, Hong Kong; HKUST, Hong Kong; The University of Hong Kong, Hong Kong; HKUST, Hong Kong; HKUST, Hong Kong", + "aff_domain": "cse.ust.hk;connect.ust.hk;cs.hku.hk;cse.ust.hk;cse.ust.hk", + "email": "cse.ust.hk;connect.ust.hk;cs.hku.hk;cse.ust.hk;cse.ust.hk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "Hong Kong University of Science and Technology;The University of Hong Kong", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ust.hk;https://www.hku.hk", + "aff_unique_abbr": "HKUST;HKU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Clear Water Bay;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.264", + "title": "An Empirical Study on Finding Spans", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present an empirical study on methods for span finding, the selection of consecutive tokens in text for some downstream tasks. We focus on approaches that can be employed in training end-to-end information extraction systems, and find there is no definitive solution without considering task properties, and provide our observations to help with future design choices: 1) a tagging approach often yields higher precision while span enumeration and boundary prediction provide higher recall; 2) span type information can benefit a boundary prediction approach; 3) additional contextualization does not help span finding in most cases.", + "author": "Weiwei Gu; Boyuan Zheng; Yunmo Chen; Tongfei Chen; Benjamin Van Durme", + "authorids": "/w/weiwei-gu/; /b/boyuan-zheng/; /y/yunmo-chen/; /t/tongfei-chen/; /b/benjamin-van-durme/", + "bibtex": "@inproceedings{gu-etal-2022-empirical,\n title = \"An Empirical Study on Finding Spans\",\n author = \"Gu, Weiwei and\n Zheng, Boyuan and\n Chen, Yunmo and\n Chen, Tongfei and\n Van Durme, Benjamin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.264/\",\n doi = \"10.18653/v1/2022.emnlp-main.264\",\n pages = \"3976--3983\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.264.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.264/", + "pdf_size": 311263, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4303469292093383225&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Rochester; Johns Hopkins University; Johns Hopkins University; Microsoft Semantic Machines; Johns Hopkins University", + "aff_domain": "ur.rochester.edu;jhu.edu;jhu.edu;pm.me;jhu.edu", + "email": "ur.rochester.edu;jhu.edu;jhu.edu;pm.me;jhu.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;2;1", + "aff_unique_norm": "University of Rochester;Johns Hopkins University;Microsoft", + "aff_unique_dep": ";;Semantic Machines", + "aff_unique_url": "https://www.rochester.edu;https://www.jhu.edu;https://www.microsoft.com", + "aff_unique_abbr": "U of R;JHU;Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.726", + "title": "An Empirical Study on the Transferability of Transformer Modules in Parameter-efficient Fine-tuning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Parameter-efficient fine-tuning has garnered lots of attention in recent studies.On this subject, we investigate the capability of different transformer modules in transferring knowledge from a pre-trained model to a downstream task. Our empirical results suggest that every transformer module is a winning ticket such that fine-tuning the specific module while the rest of the network is frozen achieves a comparable performance to the full fine-tuning case. Among different modules in LMs, LayerNorms exhibit a significant capacity for transfer learning to the extent that with only 0.003% updateable parameters in the layer-wise analysis, they can show acceptable performance on various target tasks.We argue that the performance of LayerNorms could be attributed to their high-magnitude weights compared to other components in a pre-trained model.", + "author": "Mohammad AkbarTajari; Sara Rajaee; Mohammad Taher Pilehvar", + "authorids": "/m/mohammad-akbartajari/; /s/sara-rajaee/; /m/mohammad-taher-pilehvar/", + "bibtex": "@inproceedings{akbartajari-etal-2022-empirical,\n title = \"An Empirical Study on the Transferability of Transformer Modules in Parameter-efficient Fine-tuning\",\n author = \"AkbarTajari, Mohammad and\n Rajaee, Sara and\n Pilehvar, Mohammad Taher\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.726/\",\n doi = \"10.18653/v1/2022.emnlp-main.726\",\n pages = \"10617--10625\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.726.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.726/", + "pdf_size": 875525, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=143924670382569100&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 5, + "aff": "Sharif University of Technology, Iran; University of Amsterdam, Netherlands; Tehran Institute for Advanced Studies + Khatam University, Iran", + "aff_domain": "gmail.com;uva.nl;cam.ac.uk", + "email": "gmail.com;uva.nl;cam.ac.uk", + "github": "https://github.com/m-tajari/transformer-transferability", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2+3", + "aff_unique_norm": "Sharif University of Technology;University of Amsterdam;Tehran Institute for Advanced Studies;Khatam University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.sharif.edu;https://www.uva.nl;http://www.tias.ir;http://www.kut.ac.ir", + "aff_unique_abbr": "SUT;UvA;;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0+0", + "aff_country_unique": "Iran;Netherlands" + }, + { + "id": "2022.findings-emnlp.278", + "title": "An Error-Guided Correction Model for Chinese Spelling Error Correction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Although existing neural network approaches have achieved great progress on Chinese spelling correction, there is still room to improve. The model is required to avoid over-correction and to distinguish a correct token from its phonological and visual similar ones. In this paper, we propose an error-guided correction model to address these issues. By borrowing the powerful ability of the pre-trained BERT model, we propose a novel zero-shot error detection method to do a preliminary detection, which guides our model to attend more on the probably wrong tokens in encoding and to avoid modifying the correct tokens in generating. Furthermore, we introduce a new loss function to integrate the error confusion set, which enables our model to distinguish similar tokens. Moreover, our model supports highly parallel decoding to meet real applications. Experiments are conducted on widely used benchmarks. Our model achieves superior performance against state-of-the-art approaches by a remarkable margin, on both the quality and computation speed.", + "author": "Rui Sun; Xiuyu Wu; Yunfang Wu", + "authorids": "/r/rui-sun/; /x/xiuyu-wu/; /y/yunfang-wu/", + "bibtex": "@inproceedings{sun-etal-2022-error,\n title = \"An Error-Guided Correction Model for {C}hinese Spelling Error Correction\",\n author = \"Sun, Rui and\n Wu, Xiuyu and\n Wu, Yunfang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.278/\",\n doi = \"10.18653/v1/2022.findings-emnlp.278\",\n pages = \"3800--3810\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.278.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.278/", + "pdf_size": 1207178, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12630826955582046975&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "MOE Key Laboratory of Computational Linguistics, Peking University, Beijing, China+School of Software and Microelectronics, Peking University, Beijing, China; MOE Key Laboratory of Computational Linguistics, Peking University, Beijing, China+School of Software and Microelectronics, Peking University, Beijing, China; School of Computer Science, Peking University, Beijing, China", + "aff_domain": "stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "MOE Key Laboratory of Computational Linguistics", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "0+0;0+0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.722", + "title": "An Unsupervised, Geometric and Syntax-aware Quantification of Polysemy", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Polysemy is the phenomenon where a single word form possesses two or more related senses. It is an extremely ubiquitous part of natural language and analyzing it has sparked rich discussions in the linguistics, psychology and philosophy communities alike. With scarce attention paid to polysemy in computational linguistics, and even scarcer attention toward quantifying polysemy, in this paper, we propose a novel, unsupervised framework to compute and estimate polysemy scores for words in multiple languages. We infuse our proposed quantification with syntactic knowledge in the form of dependency structures. This informs the final polysemy scores of the lexicon motivated by recent linguistic findings that suggest there is an implicit relation between syntax and ambiguity/polysemy. We adopt a graph based approach by computing the discrete Ollivier Ricci curvature on a graph of the contextual nearest neighbors. We test our framework on curated datasets controlling for different sense distributions of words in 3 typologically diverse languages - English, French and Spanish. The effectiveness of our framework is demonstrated by significant correlations of our quantification with expert human annotated language resources like WordNet. We observe a 0.3 point increase in the correlation coefficient as compared to previous quantification studies in English. Our research leverages contextual language models and syntactic structures to empirically support the widely held theoretical linguistic notion that syntax is intricately linked to ambiguity/polysemy.", + "author": "Anmol Goel; Charu Sharma; Ponnurangam Kumaraguru", + "authorids": "/a/anmol-goel/; /c/charu-sharma/; /p/ponnurangam-kumaraguru/", + "bibtex": "@inproceedings{goel-etal-2022-unsupervised,\n title = \"An Unsupervised, Geometric and Syntax-aware Quantification of Polysemy\",\n author = \"Goel, Anmol and\n Sharma, Charu and\n Kumaraguru, Ponnurangam\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.722/\",\n doi = \"10.18653/v1/2022.emnlp-main.722\",\n pages = \"10565--10574\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.722.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.722/", + "pdf_size": 626466, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4213592919686830047&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "International Institute of Information Technology, Hyderabad; International Institute of Information Technology, Hyderabad; International Institute of Information Technology, Hyderabad", + "aff_domain": "research.iiit.ac.in;iiit.ac.in;iiit.ac.in", + "email": "research.iiit.ac.in;iiit.ac.in;iiit.ac.in", + "github": "https://github.com/agoel00/polysemy", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "International Institute of Information Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://iiit Hyderabad.ac.in", + "aff_unique_abbr": "IIIT Hyderabad", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Hyderabad", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.emnlp-main.643", + "title": "Analogical Math Word Problems Solving with Enhanced Problem-Solution Association", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Math word problem (MWP) solving is an important task in question answering which requires human-like reasoning ability. Analogical reasoning has long been used in mathematical education, as it enables students to apply common relational structures of mathematical situations to solve new problems. In this paper, we propose to build a novel MWP solver by leveraging analogical MWPs, which advance the solver\u2019s generalization ability across different kinds of MWPs. The key idea, named analogy identification, is to associate the analogical MWP pairs in a latent space, i.e., encoding an MWP close to another analogical MWP, while leaving away from the non-analogical ones. Moreover, a solution discriminator is integrated into the MWP solver to enhance the association between an MWP and its true solution. The evaluation results verify that our proposed analogical learning strategy promotes the performance of MWP-BERT on Math23k over the state-of-the-art model Generate2Rank, with 5 times fewer parameters in the encoder. We also find that our model has a stronger generalization ability in solving difficult MWPs due to the analogical learning from easy MWPs.", + "author": "Zhenwen Liang; Jipeng Zhang; Xiangliang Zhang", + "authorids": "/z/zhenwen-liang/; /j/jipeng-zhang/; /x/xiangliang-zhang/", + "bibtex": "@inproceedings{liang-etal-2022-analogical,\n title = \"Analogical Math Word Problems Solving with Enhanced Problem-Solution Association\",\n author = \"Liang, Zhenwen and\n Zhang, Jipeng and\n Zhang, Xiangliang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.643/\",\n doi = \"10.18653/v1/2022.emnlp-main.643\",\n pages = \"9454--9464\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.643.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.643/", + "pdf_size": 566977, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16603001959581383056&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Notre Dame; Hong Kong University of Science and Technology; University of Notre Dame", + "aff_domain": "nd.edu;conect.ust.hk;nd.edu", + "email": "nd.edu;conect.ust.hk;nd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Notre Dame;Hong Kong University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.nd.edu;https://www.ust.hk", + "aff_unique_abbr": "Notre Dame;HKUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.325", + "title": "Analyzing and Evaluating Faithfulness in Dialogue Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Dialogue summarization is abstractive in nature, making it suffer from factual errors. The factual correctness of summaries has the highest priority before practical applications. Many efforts have been made to improve faithfulness in text summarization. However, there is a lack of systematic study on dialogue summarization systems. In this work, we first perform the fine-grained human analysis on the faithfulness of dialogue summaries and observe that over 35% of generated summaries are faithfully inconsistent respective the source dialogues. Furthermore, we present a new model-level faithfulness evaluation method. It examines generation models with multi-choice questions created by rule-based transformations. Experimental results show that our evaluation schema is a strong proxy for the factual correctness of summarization models. The human-annotated faithfulness samples and the evaluation toolkit are released to facilitate future research toward faithful dialogue summarization.", + "author": "Bin Wang; Chen Zhang; Yan Zhang; Yiming Chen; Haizhou Li", + "authorids": "/b/bin-wang/; /c/chen-zhang/; /y/yan-zhang/; /y/yiming-chen/; /h/haizhou-li/", + "bibtex": "@inproceedings{wang-etal-2022-analyzing,\n title = \"Analyzing and Evaluating Faithfulness in Dialogue Summarization\",\n author = \"Wang, Bin and\n Zhang, Chen and\n Zhang, Yan and\n Chen, Yiming and\n Li, Haizhou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.325/\",\n doi = \"10.18653/v1/2022.emnlp-main.325\",\n pages = \"4897--4908\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.325.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.325/", + "pdf_size": 946397, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10260694017570386265&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "National University of Singapore, Singapore; National University of Singapore, Singapore; National University of Singapore, Singapore; National University of Singapore, Singapore; The Chinese University of Hong Kong, Shenzhen, China + Kriston AI, China", + "aff_domain": "gmail.com; ; ; ; ", + "email": "gmail.com; ; ; ; ", + "github": "https://github.com/BinWang28/FacEval", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1+2", + "aff_unique_norm": "National University of Singapore;The Chinese University of Hong Kong;Kriston AI", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.nus.edu.sg;https://www.cuhk.edu.cn;", + "aff_unique_abbr": "NUS;CUHK;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0;1+1", + "aff_country_unique": "Singapore;China" + }, + { + "id": "2022.findings-emnlp.545", + "title": "Analyzing the Limits of Self-Supervision in Handling Bias in Language", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Prompting inputs with natural language task descriptions has emerged as a popular mechanism to elicit reasonably accurate outputs from large-scale generative language models with little to no in-context supervision. This also helps gain insight into how well language models capture the semantics of a wide range of downstream tasks purely from self-supervised pre-training on massive corpora of unlabeled text. Such models have naturally also been exposed to a lot of undesirable content like racist and sexist language and there is only some work on awareness of models along these dimensions. In this paper, we define and comprehensively evaluate how well such language models capture the semantics of four tasks for bias: diagnosis, identification, extraction and rephrasing. We define three broad classes of task descriptions for these tasks: statement, question, and completion, with numerous lexical variants within each class. We study the efficacy of prompting for each task using these classes and the null task description across several decoding methods and few-shot examples. Our analyses indicate that language models are capable of performing these tasks to widely varying degrees across different bias dimensions, such as gender and political affiliation. We believe our work is an important step towards unbiased language models by quantifying the limits of current self-supervision objectives at accomplishing such sociologically challenging tasks.", + "author": "Lisa Bauer; Karthik Gopalakrishnan; Spandana Gella; Yang Liu; Mohit Bansal; Dilek Hakkani-Tur", + "authorids": "/l/lisa-bauer/; /k/karthik-gopalakrishnan/; /s/spandana-gella/; /y/yang-liu/; /m/mohit-bansal/; /d/dilek-hakkani-tur/", + "bibtex": "@inproceedings{bauer-etal-2022-analyzing,\n title = \"Analyzing the Limits of Self-Supervision in Handling Bias in Language\",\n author = \"Bauer, Lisa and\n Gopalakrishnan, Karthik and\n Gella, Spandana and\n Liu, Yang and\n Bansal, Mohit and\n Hakkani-Tur, Dilek\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.545/\",\n doi = \"10.18653/v1/2022.findings-emnlp.545\",\n pages = \"7372--7386\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.545.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.545/", + "pdf_size": 330998, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6187585015485158257&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 8, + "aff": "UNC Chapel Hill+Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; UNC Chapel Hill; Amazon Alexa AI", + "aff_domain": "cs.unc.edu;amazon.com;amazon.com;amazon.com;cs.unc.edu;amazon.com", + "email": "cs.unc.edu;amazon.com;amazon.com;amazon.com;cs.unc.edu;amazon.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;1;1;0;1", + "aff_unique_norm": "University of North Carolina at Chapel Hill;Amazon", + "aff_unique_dep": ";Alexa AI", + "aff_unique_url": "https://www.unc.edu;https://www.amazon.com", + "aff_unique_abbr": "UNC;Amazon", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chapel Hill;", + "aff_country_unique_index": "0+0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.234", + "title": "Analyzing the Mono- and Cross-Lingual Pretraining Dynamics of Multilingual Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The emergent cross-lingual transfer seen in multilingual pretrained models has sparked significant interest in studying their behavior. However, because these analyses have focused on fully trained multilingual models, little is known about the dynamics of the multilingual pretraining process. We investigate when these models acquire their in-language and cross-lingual abilities by probing checkpoints taken from throughout XLM-R pretraining, using a suite of linguistic tasks. Our analysis shows that the model achieves high in-language performance early on, with lower-level linguistic skills acquired before more complex ones. In contrast, the point in pretraining when the model learns to transfer cross-lingually differs across language pairs. Interestingly, we also observe that, across many languages and tasks, the final model layer exhibits significant performance degradation over time, while linguistic knowledge propagates to lower layers of the network. Taken together, these insights highlight the complexity of multilingual pretraining and the resulting varied behavior for different languages over time.", + "author": "Terra Blevins; Hila Gonen; Luke Zettlemoyer", + "authorids": "/t/terra-blevins/; /h/hila-gonen/; /l/luke-zettlemoyer/", + "bibtex": "@inproceedings{blevins-etal-2022-analyzing,\n title = \"Analyzing the Mono- and Cross-Lingual Pretraining Dynamics of Multilingual Language Models\",\n author = \"Blevins, Terra and\n Gonen, Hila and\n Zettlemoyer, Luke\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.234/\",\n doi = \"10.18653/v1/2022.emnlp-main.234\",\n pages = \"3575--3590\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.234.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.234/", + "pdf_size": 6443574, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16837586337738044237&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington + Meta AI Research; Paul G. Allen School of Computer Science & Engineering, University of Washington + Meta AI Research; Paul G. Allen School of Computer Science & Engineering, University of Washington + Meta AI Research", + "aff_domain": "cs.washington.edu;gmail.com;cs.washington.edu", + "email": "cs.washington.edu;gmail.com;cs.washington.edu", + "github": "", + "project": "https://nlp.cs.washington.edu/xlmr-across-time", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "University of Washington;Meta Platforms, Inc.", + "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering;Meta AI Research", + "aff_unique_url": "https://www.washington.edu;https://meta.com", + "aff_unique_abbr": "UW;Meta AI", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Seattle;", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.457", + "title": "Answer Quality Aware Aggregation for Extractive QA Crowdsourcing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Quality control is essential for creating extractive question answering (EQA) datasets via crowdsourcing. Aggregation across answers, i.e. word spans within passages annotated, by different crowd workers is one major focus for ensuring its quality. However, crowd workers cannot reach a consensus on a considerable portion of questions. We introduce a simple yet effective answer aggregation method that takes into account the relations among the answer, question, and context passage. We evaluate answer quality from both the view of question answering model to determine how confident the QA model is about each answer and the view of the answer verification model to determine whether the answer is correct. Then we compute aggregation scores with each answer\u2019s quality and its contextual embedding produced by pre-trained language models. The experiments on a large real crowdsourced EQA dataset show that our framework outperforms baselines by around 16% on precision and effectively conduct answer aggregation for extractive QA task.", + "author": "Peide Zhu; Zhen Wang; Claudia Hauff; Jie Yang; Avishek Anand", + "authorids": "/p/peide-zhu/; /z/zhen-wang/; /c/claudia-hauff/; /j/jie-yang/; /a/avishek-anand/", + "bibtex": "@inproceedings{zhu-etal-2022-answer,\n title = \"Answer Quality Aware Aggregation for Extractive {QA} Crowdsourcing\",\n author = \"Zhu, Peide and\n Wang, Zhen and\n Hauff, Claudia and\n Yang, Jie and\n Anand, Avishek\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.457/\",\n doi = \"10.18653/v1/2022.findings-emnlp.457\",\n pages = \"6147--6159\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.457.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.457/", + "pdf_size": 633872, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10573703884041917786&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "Delft University of Technology; Delft University of Technology; Delft University of Technology; Delft University of Technology; Delft University of Technology", + "aff_domain": "tudelft.nl;student.tudelft.nl;tudelft.nl;tudelft.nl;tudelft.nl", + "email": "tudelft.nl;student.tudelft.nl;tudelft.nl;tudelft.nl;tudelft.nl", + "github": "https://github.com/zpeide/Answer-Quality-Aware-Aggregation", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Delft University of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tudelft.nl", + "aff_unique_abbr": "TUDelft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Netherlands" + }, + { + "id": "2022.emnlp-main.666", + "title": "Are All Spurious Features in Natural Language Alike? An Analysis through a Causal Lens", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The term \u2018spurious correlations\u2019 has been used in NLP to informally denote any undesirable feature-label correlations. However, a correlation can be undesirable because (i) the feature is irrelevant to the label (e.g. punctuation in a review), or (ii) the feature\u2019s effect on the label depends on the context (e.g. negation words in a review), which is ubiquitous in language tasks. In case (i), we want the model to be invariant to the feature, which is neither necessary nor sufficient for prediction. But in case (ii), even an ideal model (e.g. humans) must rely on the feature, since it is necessary (but not sufficient) for prediction. Therefore, a more fine-grained treatment of spurious features is needed to specify the desired model behavior. We formalize this distinction using a causal model and probabilities of necessity and sufficiency, which delineates the causal relations between a feature and a label. We then show that this distinction helps explain results of existing debiasing methods on different spurious features, and demystifies surprising results such as the encoding of spurious features in model representations after debiasing.", + "author": "Nitish Joshi; Xiang Pan; He He", + "authorids": "/n/nitish-joshi/; /x/xiang-pan/; /h/he-he/", + "bibtex": "@inproceedings{joshi-etal-2022-spurious,\n title = \"Are All Spurious Features in Natural Language Alike? An Analysis through a Causal Lens\",\n author = \"Joshi, Nitish and\n Pan, Xiang and\n He, He\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.666/\",\n doi = \"10.18653/v1/2022.emnlp-main.666\",\n pages = \"9804--9817\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.666.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.666/", + "pdf_size": 404615, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15047160850122331247&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, New York University + Center for Data Science, New York University; Department of Computer Science, New York University + Center for Data Science, New York University; Department of Computer Science, New York University + Center for Data Science, New York University", + "aff_domain": "nyu.edu;nyu.edu;nyu.edu", + "email": "nyu.edu;nyu.edu;nyu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;0+0", + "aff_unique_norm": "New York University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.nyu.edu", + "aff_unique_abbr": "NYU", + "aff_campus_unique_index": "0+0;0+0;0+0", + "aff_campus_unique": "New York", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.137", + "title": "Are Hard Examples also Harder to Explain? A Study with Human and Model-Generated Explanations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work on explainable NLP has shown that few-shot prompting can enable large pre-trained language models (LLMs) to generate grammatical and factual natural language explanations for data labels. In this work, we study the connection between explainability and sample hardness by investigating the following research question \u2013 \u201cAre LLMs and humans equally good at explaining data labels for both easy and hard samples?\u201d We answer this question by first collecting human-written explanations in the form of generalizable commonsense rules on the task of Winograd Schema Challenge (Winogrande dataset). We compare these explanations with those generated by GPT-3 while varying the hardness of the test samples as well as the in-context samples. We observe that (1) GPT-3 explanations are as grammatical as human explanations regardless of the hardness of the test samples, (2) for easy examples, GPT-3 generates highly supportive explanations but human explanations are more generalizable, and (3) for hard examples, human explanations are significantly better than GPT-3 explanations both in terms of label-supportiveness and generalizability judgements. We also find that hardness of the in-context examples impacts the quality of GPT-3 explanations. Finally, we show that the supportiveness and generalizability aspects of human explanations are also impacted by sample hardness, although by a much smaller margin than models.", + "author": "Swarnadeep Saha; Peter Hase; Nazneen Rajani; Mohit Bansal", + "authorids": "/s/swarnadeep-saha/; /p/peter-hase/; /n/nazneen-rajani/; /m/mohit-bansal/", + "bibtex": "@inproceedings{saha-etal-2022-hard,\n title = \"Are Hard Examples also Harder to Explain? A Study with Human and Model-Generated Explanations\",\n author = \"Saha, Swarnadeep and\n Hase, Peter and\n Rajani, Nazneen and\n Bansal, Mohit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.137/\",\n doi = \"10.18653/v1/2022.emnlp-main.137\",\n pages = \"2121--2131\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.137.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.137/", + "pdf_size": 1673004, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17357789258464478100&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "UNC Chapel Hill; UNC Chapel Hill; Hugging Face; UNC Chapel Hill", + "aff_domain": "cs.unc.edu;cs.unc.edu;huggingface.co;cs.unc.edu", + "email": "cs.unc.edu;cs.unc.edu;huggingface.co;cs.unc.edu", + "github": "https://github.com/swarnaHub/ExplanationHardness", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "University of North Carolina at Chapel Hill;Hugging Face", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.unc.edu;https://huggingface.co", + "aff_unique_abbr": "UNC;Hugging Face", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chapel Hill;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.148", + "title": "Are Large Pre-Trained Language Models Leaking Your Personal Information?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Are Large Pre-Trained Language Models Leaking Your Personal Information? In this paper, we analyze whether Pre-Trained Language Models (PLMs) are prone to leaking personal information. Specifically, we query PLMs for email addresses with contexts of the email address or prompts containing the owner\u2019s name. We find that PLMs do leak personal information due to memorization. However, since the models are weak at association, the risk of specific personal information being extracted by attackers is low. We hope this work could help the community to better understand the privacy risk of PLMs and bring new insights to make PLMs safe.", + "author": "Jie Huang; Hanyin Shao; Kevin Chen-Chuan Chang", + "authorids": "/j/jie-huang/; /h/hanyin-shao/; /k/kevin-chen-chuan-chang/", + "bibtex": "@inproceedings{huang-etal-2022-large,\n title = \"Are Large Pre-Trained Language Models Leaking Your Personal Information?\",\n author = \"Huang, Jie and\n Shao, Hanyin and\n Chang, Kevin Chen-Chuan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.148/\",\n doi = \"10.18653/v1/2022.findings-emnlp.148\",\n pages = \"2038--2047\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.148.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.148/", + "pdf_size": 496461, + "gs_citation": 191, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3394928924025709717&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of Illinois at Urbana-Champaign, USA; University of Illinois at Urbana-Champaign, USA; University of Illinois at Urbana-Champaign, USA", + "aff_domain": "illinois.edu;illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu", + "github": "https://github.com/jeffhj/LM_PersonalInfoLeak", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign", + "aff_unique_dep": "", + "aff_unique_url": "https://illinois.edu", + "aff_unique_abbr": "UIUC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Urbana-Champaign", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.390", + "title": "Are Neural Topic Models Broken?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recently, the relationship between automated and human evaluation of topic models has been called into question. Method developers have staked the efficacy of new topic model variants on automated measures, and their failure to approximate human preferences places these models on uncertain ground. Moreover, existing evaluation paradigms are often divorced from real-world use.Motivated by content analysis as a dominant real-world use case for topic modeling, we analyze two related aspects of topic models that affect their effectiveness and trustworthiness in practice for that purpose: the stability of their estimates and the extent to which the model\u2019s discovered categories align with human-determined categories in the data. We find that neural topic models fare worse in both respects compared to an established classical method. We take a step toward addressing both issues in tandem by demonstrating that a straightforward ensembling method can reliably outperform the members of the ensemble.", + "author": "Alexander Miserlis Hoyle; Rupak Sarkar; Pranav Goel; Philip Resnik", + "authorids": "/a/alexander-miserlis-hoyle/; /r/rupak-sarkar/; /p/pranav-goel/; /p/philip-resnik/", + "bibtex": "@inproceedings{hoyle-etal-2022-neural,\n title = \"Are Neural Topic Models Broken?\",\n author = \"Hoyle, Alexander Miserlis and\n Sarkar, Rupak and\n Goel, Pranav and\n Resnik, Philip\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.390/\",\n doi = \"10.18653/v1/2022.findings-emnlp.390\",\n pages = \"5321--5344\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.390.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.390/", + "pdf_size": 602570, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6122573898126114782&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Computer Science, University of Maryland; Computer Science, University of Maryland; Computer Science, University of Maryland; UMIACS, Linguistics, University of Maryland", + "aff_domain": "cs.umd.edu;cs.umd.edu;cs.umd.edu;cs.umd.edu", + "email": "cs.umd.edu;cs.umd.edu;cs.umd.edu;cs.umd.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Maryland", + "aff_unique_dep": "Computer Science", + "aff_unique_url": "https://www/umd.edu", + "aff_unique_abbr": "UMD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.617", + "title": "Are representations built from the ground up? An empirical examination of local composition in language models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Compositionality, the phenomenon where the meaning of a phrase can be derived from its constituent parts, is a hallmark of human language. At the same time, many phrases are non-compositional, carrying a meaning beyond that of each part in isolation. Representing both of these types of phrases is critical for language understanding, but it is an open question whether modern language models (LMs) learn to do so; in this work we examine this question. We first formulate a problem of predicting the LM-internal representations of longer phrases given those of their constituents. We find that the representation of a parent phrase can be predicted with some accuracy given an affine transformation of its children. While we would expect the predictive accuracy to correlate with human judgments of semantic compositionality, we find this is largely not the case, indicating that LMs may not accurately distinguish between compositional and non-compositional phrases. We perform a variety of analyses, shedding light on when different varieties of LMs do and do not generate compositional representations, and discuss implications for future modeling work.", + "author": "Emmy Liu; Graham Neubig", + "authorids": "/e/emmy-liu/; /g/graham-neubig/", + "bibtex": "@inproceedings{liu-neubig-2022-representations,\n title = \"Are representations built from the ground up? An empirical examination of local composition in language models\",\n author = \"Liu, Emmy and\n Neubig, Graham\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.617/\",\n doi = \"10.18653/v1/2022.emnlp-main.617\",\n pages = \"9053--9073\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.617.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.617/", + "pdf_size": 3244385, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15655991647691585346&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University", + "aff_domain": "cs.cmu.edu;cs.cmu.edu", + "email": "cs.cmu.edu;cs.cmu.edu", + "github": "https://github.com/nightingal3/lm-compositionality", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Language Technologies Institute", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.609", + "title": "Argument Mining for Review Helpfulness Prediction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The importance of reliably determining the helpfulness of product reviews is rising as both helpful and unhelpful reviews continue to accumulate on e-commerce websites. And argumentational features\u2014such as the structure of arguments and the types of underlying elementary units\u2014have shown to be promising indicators of product review helpfulness. However, their adoption has been limited due to the lack of sufficient resources and large-scale experiments investigating their utility. To this end, we present the AMazon Argument Mining (AM2) corpus\u2014a corpus of 878 Amazon reviews on headphones annotated according to a theoretical argumentation model designed to evaluate argument quality.Experiments show that employing argumentational features leads to statistically significant improvements over the state-of-the-art review helpfulness predictors under both text-only and text-and-image settings.", + "author": "Zaiqian Chen; Daniel Verdi do Amarante; Jenna Donaldson; Yohan Jo; Joonsuk Park", + "authorids": "/z/zaiqian-chen/; /d/daniel-verdi-do-amarante/; /j/jenna-donaldson/; /y/yohan-jo/; /j/joonsuk-park/", + "bibtex": "@inproceedings{chen-etal-2022-argument,\n title = \"Argument Mining for Review Helpfulness Prediction\",\n author = \"Chen, Zaiqian and\n Verdi do Amarante, Daniel and\n Donaldson, Jenna and\n Jo, Yohan and\n Park, Joonsuk\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.609/\",\n doi = \"10.18653/v1/2022.emnlp-main.609\",\n pages = \"8914--8922\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.609.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.609/", + "pdf_size": 328238, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=957507513136488591&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.600", + "title": "ArtELingo: A Million Emotion Annotations of WikiArt with Emphasis on Diversity over Language and Culture", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This paper introduces ArtELingo, a new benchmark and dataset, designed to encourage work on diversity across languages and cultures. Following ArtEmis, a collection of 80k artworks from WikiArt with 0.45M emotion labels and English-only captions, ArtELingo adds another 0.79M annotations in Arabic and Chinese, plus 4.8K in Spanish to evaluate \u201ccultural-transfer\u201d performance. 51K artworks have 5 annotations or more in 3 languages. This diversity makes it possible to study similarities and differences across languages and cultures. Further, we investigate captioning tasks, and find diversity improves the performance of baseline models. ArtELingo is publicly available at \u2018www.artelingo.org\u2018 with standard splits and baseline models. We hope our work will help ease future research on multilinguality and culturally-aware AI.", + "author": "Youssef Mohamed; Mohamed Abdelfattah; Shyma Alhuwaider; Feifan Li; Xiangliang Zhang; Kenneth Church; Mohamed Elhoseiny", + "authorids": "/y/youssef-mohamed/; /m/mohamed-abdelfattah/; /s/shyma-alhuwaider/; /f/feifan-li/; /x/xiangliang-zhang/; /k/kenneth-church/; /m/mohamed-elhoseiny/", + "bibtex": "@inproceedings{mohamed-etal-2022-artelingo,\n title = \"{A}rt{EL}ingo: A Million Emotion Annotations of {W}iki{A}rt with Emphasis on Diversity over Language and Culture\",\n author = \"Mohamed, Youssef and\n Abdelfattah, Mohamed and\n Alhuwaider, Shyma and\n Li, Feifan and\n Zhang, Xiangliang and\n Church, Kenneth and\n Elhoseiny, Mohamed\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.600/\",\n doi = \"10.18653/v1/2022.emnlp-main.600\",\n pages = \"8770--8785\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.600.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.600/", + "pdf_size": 9164974, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12462471869088783534&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "KAUST; KAUST; KAUST; KAUST; University of Notre Dame; Northeastern University; KAUST", + "aff_domain": "kaust.edu.sa;kaust.edu.sa;kaust.edu.sa;kaust.edu.sa;nd.edu;northeastern.edu;kaust.edu.sa", + "email": "kaust.edu.sa;kaust.edu.sa;kaust.edu.sa;kaust.edu.sa;nd.edu;northeastern.edu;kaust.edu.sa", + "github": "", + "project": "www.artelingo.org", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;2;0", + "aff_unique_norm": "King Abdullah University of Science and Technology;University of Notre Dame;Northeastern University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.kaust.edu.sa;https://www.nd.edu;https://www.northeastern.edu", + "aff_unique_abbr": "KAUST;Notre Dame;NEU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;1;0", + "aff_country_unique": "Saudi Arabia;United States" + }, + { + "id": "2022.emnlp-industry.9", + "title": "Ask-and-Verify: Span Candidate Generation and Verification for Attribute Value Extraction", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "The product attribute value extraction (AVE) task aims to capture key factual information from product profiles, and is useful for several downstream applications in e-Commerce platforms. Previous contributions usually formulate this task using sequence labeling or reading comprehension architectures. However, sequence labeling models tend to be conservative in their predictions resulting in a high false negative rate. Existing reading comprehension formulations, on the other hand, can over-generate attribute values which hinders precision. In the present work we address these limitations with a new end-to-end pipeline framework called Ask-and-Verify. Given a product and an attribute query, the Ask step detects the top-K span candidates (i.e. possible attribute values) from the product profiles, then the Verify step filters out false positive candidates. We evaluate Ask-and-Verify model on Amazon\u2019s product pages and AliExpress public dataset, and present a comparative analysis as well as a detailed ablation study. Despite its simplicity, we show that Ask-and-Verify outperforms recent state-of-the-art models by up to 3.1% F1 absolute improvement points, while also scaling to thousands of attributes.", + "author": "Yifan Ding; Yan Liang; Nasser Zalmout; Xian Li; Christan Grant; Tim Weninger", + "authorids": "/y/yifan-ding/; /y/yan-liang/; /n/nasser-zalmout/; /x/xian-li/; /c/christan-grant/; /t/tim-weninger/", + "bibtex": "@inproceedings{ding-etal-2022-ask,\n title = \"Ask-and-Verify: Span Candidate Generation and Verification for Attribute Value Extraction\",\n author = \"Ding, Yifan and\n Liang, Yan and\n Zalmout, Nasser and\n Li, Xian and\n Grant, Christan and\n Weninger, Tim\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.9/\",\n doi = \"10.18653/v1/2022.emnlp-industry.9\",\n pages = \"110--110\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.9.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.9/", + "pdf_size": 2026843, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6984911498186942213&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Notre Dame1; Amazon.com2; Amazon.com2; Amazon.com2; University of Oklahoma3; University of Notre Dame1", + "aff_domain": "nd.edu;amazon.com;amazon.com;amazon.com;ou.edu;nd.edu", + "email": "nd.edu;amazon.com;amazon.com;amazon.com;ou.edu;nd.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;2;0", + "aff_unique_norm": "University of Notre Dame;Amazon.com;University of Oklahoma", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.nd.edu;https://www.amazon.com;https://www.ou.edu", + "aff_unique_abbr": "Notre Dame;Amazon;OU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.172", + "title": "Assessing Non-autoregressive Alignment in Neural Machine Translation via Word Reordering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent work on non-autoregressive neural machine translation (NAT) that leverages alignment information to explicitly reduce the modality of target distribution has reported comparable performance with counterparts that tackle multi-modality problem by implicitly modeling dependencies. Effectiveness in handling alignment is vital for models that follow this approach, where a token reordering mechanism is typically involved and plays a vital role. We review the reordering capability of the respective mechanisms in recent NAT models, and our experimental results show that their performance is sub-optimal. We propose to learn a non-autoregressive language model (NALM) based on transformer which can be combined with Viterbi decoding to achieve better reordering performance. We evaluate the proposed NALM using the PTB dataset where sentences with words permuted in different ways are expected to have their ordering recovered. Our empirical results show that the proposed method can outperform the state-of-the-art reordering mechanisms under different word permutation settings, with a 2-27 BLEU improvement, suggesting high potential for word alignment in NAT.", + "author": "Chun-Hin Tse; Ester Leung; William K. Cheung", + "authorids": "/c/chun-hin-tse/; /e/ester-leung/; /w/william-k-cheung/", + "bibtex": "@inproceedings{tse-etal-2022-assessing,\n title = \"Assessing Non-autoregressive Alignment in Neural Machine Translation via Word Reordering\",\n author = \"Tse, Chun-Hin and\n Leung, Ester and\n Cheung, William K.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.172/\",\n doi = \"10.18653/v1/2022.findings-emnlp.172\",\n pages = \"2327--2333\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.172.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.172/", + "pdf_size": 394572, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:PwzZy5gfyUsJ:scholar.google.com/&scioq=Assessing+Non-autoregressive+Alignment+in+Neural+Machine+Translation+via+Word+Reordering&hl=en&as_sdt=0,44", + "gs_version_total": 0, + "aff": "Dept. of TIIS, Hong Kong Baptist University; Asia Institute, The University of Melbourne; Dept. of Computer Science, Hong Kong Baptist University", + "aff_domain": "comp.hkbu.edu.hk;unimelb.edu.au;comp.hkbu.edu.hk", + "email": "comp.hkbu.edu.hk;unimelb.edu.au;comp.hkbu.edu.hk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Hong Kong Baptist University;The University of Melbourne", + "aff_unique_dep": "Dept. of TIIS;Asia Institute", + "aff_unique_url": "https://www.hkbu.edu.hk;https://www.unimelb.edu.au", + "aff_unique_abbr": "HKBU;UniMelb", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Melbourne;Hong Kong", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "2022.emnlp-main.468", + "title": "Assist Non-native Viewers: Multimodal Cross-Lingual Summarization for How2 Videos", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multimodal summarization for videos aims to generate summaries from multi-source information (videos, audio transcripts), which has achieved promising progress. However, existing works are restricted to monolingual video scenarios, ignoring the demands of non-native video viewers to understand the cross-language videos in practical applications. It stimulates us to propose a new task, named Multimodal Cross-Lingual Summarization for videos (MCLS), which aims to generate cross-lingual summaries from multimodal inputs of videos. First, to make it applicable to MCLS scenarios, we conduct a Video-guided Dual Fusion network (VDF) that integrates multimodal and cross-lingual information via diverse fusion strategies at both encoder and decoder. Moreover, to alleviate the problem of high annotation costs and limited resources in MCLS, we propose a triple-stage training framework to assist MCLS by transferring the knowledge from monolingual multimodal summarization data, which includes: 1) multimodal summarization on sufficient prevalent language videos with a VDF model; 2) knowledge distillation (KD) guided adjustment on bilingual transcripts; 3) multimodal summarization for cross-lingual videos with a KD induced VDF model. Experiment results on the reorganized How2 dataset show that the VDF model alone outperforms previous methods for multimodal summarization, and the performance further improves by a large margin via the proposed triple-stage training framework.", + "author": "Nayu Liu; Kaiwen Wei; Xian Sun; Hongfeng Yu; Fanglong Yao; Li Jin; Guo Zhi; Guangluan Xu", + "authorids": "/n/nayu-liu/; /k/kaiwen-wei/; /x/xian-sun/; /h/hongfeng-yu/; /f/fanglong-yao/; /l/li-jin/; /g/guo-zhi/; /g/guangluan-xu/", + "bibtex": "@inproceedings{liu-etal-2022-assist,\n title = \"Assist Non-native Viewers: Multimodal Cross-Lingual Summarization for How2 Videos\",\n author = \"Liu, Nayu and\n Wei, Kaiwen and\n Sun, Xian and\n Yu, Hongfeng and\n Yao, Fanglong and\n Jin, Li and\n Zhi, Guo and\n Xu, Guangluan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.468/\",\n doi = \"10.18653/v1/2022.emnlp-main.468\",\n pages = \"6959--6969\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.468.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.468/", + "pdf_size": 747669, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5419430749162870673&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences+School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences; Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences+School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences; Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences+School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences; Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences; Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences; Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences; Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences; Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences", + "aff_domain": "mails.ucas.ac.cn;mails.ucas.ac.cn; ; ;mails.ucas.ac.cn; ; ; ", + "email": "mails.ucas.ac.cn;mails.ucas.ac.cn; ; ;mails.ucas.ac.cn; ; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0+1;0;0;0;0;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Key Laboratory of Network Information System Technology;School of Electronic, Electrical and Communication Engineering", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.24", + "title": "AssistSR: Task-oriented Video Segment Retrieval for Personal AI Assistant", + "track": "main", + "status": "finding", + "award": false, + "abstract": "It is still a pipe dream that personal AI assistants on the phone and AR glasses can assist our daily life in addressing our questions like \u201chow to adjust the date for this watch?\u201d and \u201chow to set its heating duration? (while pointing at an oven)\u201d. The queries used in conventional tasks (i.e. Video Question Answering, Video Retrieval, Moment Localization) are often factoid and based on pure text. In contrast, we present a new task called Task-oriented Question-driven Video Segment Retrieval (TQVSR). Each of our questions is an image-box-text query that focuses on affordance of items in our daily life and expects relevant answer segments to be retrieved from a corpus of instructional video-transcript segments. To support the study of this TQVSR task, we construct a new dataset called AssistSR. We design novel guidelines to create high-quality samples. This dataset contains 3.2k multimodal questions on 1.6k video segments from instructional videos on diverse daily-used items. To address TQVSR, we develop a simple yet effective model called Dual Multimodal Encoders (DME) that significantly outperforms several baseline methods while still having large room for improvement in the future. Moreover, we present detailed ablation analyses. Code and data are available at https://github.com/StanLei52/TQVSR.", + "author": "Weixian Lei; Difei Gao; Yuxuan Wang; Dongxing Mao; Zihan Liang; Lingmin Ran; Mike Zheng Shou", + "authorids": "/w/weixian-lei/; /d/difei-gao/; /y/yuxuan-wang/; /d/dongxing-mao/; /z/zihan-liang/; /l/lingmin-ran/; /m/mike-zheng-shou/", + "bibtex": "@inproceedings{lei-etal-2022-assistsr,\n title = \"{A}ssist{SR}: Task-oriented Video Segment Retrieval for Personal {AI} Assistant\",\n author = \"Lei, Weixian and\n Gao, Difei and\n Wang, Yuxuan and\n Mao, Dongxing and\n Liang, Zihan and\n Ran, Lingmin and\n Shou, Mike Zheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.24/\",\n doi = \"10.18653/v1/2022.findings-emnlp.24\",\n pages = \"319--338\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.24.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.24/", + "pdf_size": 7575925, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2065633905427370915&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "Show Lab, National University of Singapore; Show Lab, National University of Singapore; Show Lab, National University of Singapore; Show Lab, National University of Singapore; Show Lab, National University of Singapore; Show Lab, National University of Singapore; Show Lab, National University of Singapore", + "aff_domain": "nus.edu.sg;nus.edu.sg;nus.edu.sg;nus.edu.sg;nus.edu.sg;nus.edu.sg;nus.edu.sg", + "email": "nus.edu.sg;nus.edu.sg;nus.edu.sg;nus.edu.sg;nus.edu.sg;nus.edu.sg;nus.edu.sg", + "github": "https://github.com/StanLei52/TQVSR", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "National University of Singapore", + "aff_unique_dep": "Show Lab", + "aff_unique_url": "https://www.nus.edu.sg", + "aff_unique_abbr": "NUS", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Singapore", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "2022.findings-emnlp.151", + "title": "Assisting the Human Fact-Checkers: Detecting All Previously Fact-Checked Claims in a Document", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Given the recent proliferation of false claims online, there has been a lot of manual fact-checking effort. As this is very time-consuming, human fact-checkers can benefit from tools that can support them and make them more efficient. Here, we focus on building a system that could provide such support. Given an input document, it aims to detect all sentences that contain a claim that can be verified by some previously fact-checked claims (from a given database). The output is a re-ranked list of the document sentences, so that those that can be verified are ranked as high as possible, together with corresponding evidence. Unlike previous work, which has looked into claim retrieval, here we take a document-level perspective. We create a new manually annotated dataset for the task, and we propose suitable evaluation measures. We further experiment with a learning-to-rank approach, achieving sizable performance gains over several strong baselines. Our analysis demonstrates the importance of modeling text similarity and stance, while also taking into account the veracity of the retrieved previously fact-checked claims. We believe that this research would be of interest to fact-checkers, journalists, media, and regulatory authorities.", + "author": "Shaden Shaar; Nikola Georgiev; Firoj Alam; Giovanni Da San Martino; Aisha Mohamed; Preslav Nakov", + "authorids": "/s/shaden-shaar/; /n/nikola-georgiev/; /f/firoj-alam/; /g/giovanni-da-san-martino/; /a/aisha-mohamed/; /p/preslav-nakov/", + "bibtex": "@inproceedings{shaar-etal-2022-assisting,\n title = \"Assisting the Human Fact-Checkers: Detecting All Previously Fact-Checked Claims in a Document\",\n author = \"Shaar, Shaden and\n Georgiev, Nikola and\n Alam, Firoj and\n Da San Martino, Giovanni and\n Mohamed, Aisha and\n Nakov, Preslav\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.151/\",\n doi = \"10.18653/v1/2022.findings-emnlp.151\",\n pages = \"2069--2080\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.151.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.151/", + "pdf_size": 333078, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16207476857548682096&as_sdt=805&sciodt=0,3&hl=en", + "gs_version_total": 7, + "aff": "Cornell University; Sofia University; Qatar Computing Research Institute, HBKU; University of Padova; University of Wisconsin-Madison; Mohamed bin Zayed University of Artificial Intelligence", + "aff_domain": "gmail.com;gmail.com;hbku.edu.qa;math.unipd.it;wisc.edu;mbzuai.ac.ae", + "email": "gmail.com;gmail.com;hbku.edu.qa;math.unipd.it;wisc.edu;mbzuai.ac.ae", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;5", + "aff_unique_norm": "Cornell University;Sofia University;Qatar Computing Research Institute;University of Padova;University of Wisconsin-Madison;Mohamed bin Zayed University of Artificial Intelligence", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "https://www.cornell.edu;https://www.sofiauni.bg/en/;https://www.qcri.org;https://www.unipd.it;https://www.wisc.edu;https://www.mbzuai.ac.ae", + "aff_unique_abbr": "Cornell;Sofia U;QCRI;UNIPD;UW-Madison;MBZUAI", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Madison", + "aff_country_unique_index": "0;1;2;3;0;4", + "aff_country_unique": "United States;Bulgaria;Qatar;Italy;United Arab Emirates" + }, + { + "id": "2022.emnlp-main.436", + "title": "Attention and Edge-Label Guided Graph Convolutional Networks for Named Entity Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "It has been shown that named entity recognition (NER) could benefit from incorporating the long-distance structured information captured by dependency trees. However, dependency trees built by tools usually have a certain percentage of errors. Under such circumstances, how to better use relevant structured information while ignoring irrelevant or wrong structured information from the dependency trees to improve NER performance is still a challenging research problem. In this paper, we propose the Attention and Edge-Label guided Graph Convolution Network (AELGCN) model. Then, we integrate it into BiLSTM-CRF to form BiLSTM-AELGCN-CRF model. We design an edge-aware node joint update module and introduce a node-aware edge update module to explore hidden in structured information entirely and solve the wrong dependency label information to some extent. After two modules, we apply attention-guided GCN, which automatically learns how to attend to the relevant structured information selectively. We conduct extensive experiments on several standard datasets across four languages and achieve better results than previous approaches. Through experimental analysis, it is found that our proposed model can better exploit the structured information on the dependency tree to improve the recognition of long entities.", + "author": "Renjie Zhou; Zhongyi Xie; Jian Wan; Jilin Zhang; Yong Liao; Qiang Liu", + "authorids": "/r/renjie-zhou/; /z/zhongyi-xie/; /j/jian-wan/; /j/jilin-zhang/; /y/yong-liao/; /q/qiang-liu/", + "bibtex": "@inproceedings{zhou-etal-2022-attention,\n title = \"Attention and Edge-Label Guided Graph Convolutional Networks for Named Entity Recognition\",\n author = \"Zhou, Renjie and\n Xie, Zhongyi and\n Wan, Jian and\n Zhang, Jilin and\n Liao, Yong and\n Liu, Qiang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.436/\",\n doi = \"10.18653/v1/2022.emnlp-main.436\",\n pages = \"6499--6510\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.436.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.436/", + "pdf_size": 1458513, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7595210810963398262&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 2, + "aff": "Hangzhou Dianzi University; Hangzhou Dianzi University; Zhejiang University of Science and Technology; Hangzhou Dianzi University; Univerisity of Science and Technology of China; Zhejiang Police College", + "aff_domain": "hdu.edu.cn;hdu.edu.cn; ; ; ; ", + "email": "hdu.edu.cn;hdu.edu.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;2;3", + "aff_unique_norm": "Hangzhou Dianzi University;Zhejiang University of Science and Technology;University of Science and Technology of China;Zhejiang Police College", + "aff_unique_dep": ";;;", + "aff_unique_url": "http://www.hdu.edu.cn/;http://www.zjust.edu.cn;http://www.ustc.edu.cn;", + "aff_unique_abbr": "HGHDU;ZUST;USTC;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.330", + "title": "Attention weights accurately predict language representations in the brain", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In Transformer-based language models (LMs) the attention mechanism converts token embeddings into contextual embeddings that incorporate information from neighboring words. The resulting contextual hidden state embeddings have enabled highly accurate models of brain responses, suggesting that the attention mechanism constructs contextual embeddings that carry information reflected in language-related brain representations. However, it is unclear whether the attention weights that are used to integrate information across words are themselves related to language representations in the brain. To address this question we analyzed functional magnetic resonance imaging (fMRI) recordings of participants reading English language narratives. We provided the narrative text as input to two LMs (BERT and GPT-2) and extracted their corresponding attention weights. We then used encoding models to determine how well attention weights can predict recorded brain responses. We find that attention weights accurately predict brain responses in much of the frontal and temporal cortices. Our results suggest that the attention mechanism itself carries information that is reflected in brain representations. Moreover, these results indicate cortical areas in which context integration may occur.", + "author": "Mathis Lamarre; Catherine Chen; Fatma Deniz", + "authorids": "/m/mathis-lamarre/; /c/catherine-chen-ucberkley/; /f/fatma-deniz/", + "bibtex": "@inproceedings{lamarre-etal-2022-attention,\n title = \"Attention weights accurately predict language representations in the brain\",\n author = \"Lamarre, Mathis and\n Chen, Catherine and\n Deniz, Fatma\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.330/\",\n doi = \"10.18653/v1/2022.findings-emnlp.330\",\n pages = \"4513--4529\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.330.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.330/", + "pdf_size": 19768132, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8417899669670753438&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Institute of Software Engineering and Theoretical Computer Science, Technische Universit\u00e4t Berlin + Bernstein Center for Computational Neuroscience, Berlin; Department of Electrical Engineering and Computer Sciences, University of California, Berkeley; Bernstein Center for Computational Neuroscience, Berlin + Helen Wills Neuroscience Institute, University of California, Berkeley", + "aff_domain": "tu-berlin.de;berkeley.edu;berkeley.edu", + "email": "tu-berlin.de;berkeley.edu;berkeley.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2;1+2", + "aff_unique_norm": "Technische Universit\u00e4t Berlin;Bernstein Center for Computational Neuroscience;University of California, Berkeley", + "aff_unique_dep": "Institute of Software Engineering and Theoretical Computer Science;Computational Neuroscience;Department of Electrical Engineering and Computer Sciences", + "aff_unique_url": "https://www.tu-berlin.de;;https://www.berkeley.edu", + "aff_unique_abbr": "TU Berlin;;UC Berkeley", + "aff_campus_unique_index": "0+0;1;0+1", + "aff_campus_unique": "Berlin;Berkeley", + "aff_country_unique_index": "0+0;1;0+1", + "aff_country_unique": "Germany;United States" + }, + { + "id": "2022.findings-emnlp.138", + "title": "Audience-Centric Natural Language Generation via Style Infusion", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Adopting contextually appropriate, audience-tailored linguistic styles is critical to the success of user-centric language generation systems (e.g., chatbots, computer-aided writing, dialog systems). While existing approaches demonstrate text style transfer (TST) with large volumes of parallel or non-parallel data, we argue that grounding style on audience-independent external factors is innately limiting for two reasons. First, it is difficult to collect large volumes of audience-specific stylistic data. Second, some stylistic objectives (e.g., persuasiveness, memorability, empathy) are hard to define without audience feedback. In this paper, we propose the novel task of style infusion - infusing the stylistic preferences of audiences in pretrained language generation models. Since humans are better at pairwise comparisons than direct scoring - i.e., is Sample-A more persuasive/polite/empathic than Sample-B - we leverage limited pairwise human judgments to bootstrap a style analysis model and augment our seed set of judgments. We then infuse the learned textual style in a GPT-2 based text generator while balancing fluency and style adoption. With quantitative and qualitative assessments, we show that our infusion approach can generate compelling stylized examples with generic text prompts. We make the anonymized code and data accessible.", + "author": "Samraj Moorjani; Adit Krishnan; Hari Sundaram; Ewa Maslowska; Aravind Sankar", + "authorids": "/s/samraj-moorjani/; /a/adit-krishnan/; /h/hari-sundaram/; /e/ewa-maslowska/; /a/aravind-sankar/", + "bibtex": "@inproceedings{moorjani-etal-2022-audience,\n title = \"Audience-Centric Natural Language Generation via Style Infusion\",\n author = \"Moorjani, Samraj and\n Krishnan, Adit and\n Sundaram, Hari and\n Maslowska, Ewa and\n Sankar, Aravind\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.138/\",\n doi = \"10.18653/v1/2022.findings-emnlp.138\",\n pages = \"1919--1932\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.138.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.138/", + "pdf_size": 358763, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7384820420119197664&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign", + "aff_domain": "illinois.edu;illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "github": "https://github.com/CrowdDynamicsLab/StyleInfusion", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign", + "aff_unique_dep": "", + "aff_unique_url": "https://illinois.edu", + "aff_unique_abbr": "UIUC", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Urbana-Champaign", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.411", + "title": "Augmenting Multi-Turn Text-to-SQL Datasets with Self-Play", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The task of context-dependent text-to-SQL aims to convert multi-turn user utterances to formal SQL queries. This is a challenging task due to both the scarcity of training data from which to learn complex contextual dependencies and to generalize to unseen databases. In this paper we explore augmenting the training datasets using self-play, which leverages contextual information to synthesize new interactions to adapt the model to new databases. We first design a SQL-to-text model conditioned on a sampled goal query, which represents a user\u2019s intent, that then converses with a text-to-SQL semantic parser to generate new interactions. We then filter the synthesized interactions and retrain the models with the augmented data. We find that self-play improves the accuracy of a strong baseline on SParC and CoSQL, two widely used cross-domain text-to-SQL datasets. Our analysis shows that self-play simulates various conversational thematic relations, enhances cross-domain generalization and improves beam-search.", + "author": "Qi Liu; Zihuiwen Ye; Tao Yu; Linfeng Song; Phil Blunsom", + "authorids": "/q/qi-liu/; /z/zihuiwen-ye/; /t/tao-yu/; /l/linfeng-song/; /p/phil-blunsom/", + "bibtex": "@inproceedings{liu-etal-2022-augmenting-multi,\n title = \"Augmenting Multi-Turn Text-to-{SQL} Datasets with Self-Play\",\n author = \"Liu, Qi and\n Ye, Zihuiwen and\n Yu, Tao and\n Song, Linfeng and\n Blunsom, Phil\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.411/\",\n doi = \"10.18653/v1/2022.findings-emnlp.411\",\n pages = \"5608--5620\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.411.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.411/", + "pdf_size": 481674, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8493427273638919893&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "The University of Hong Kong; University of Oxford; The University of Hong Kong; University of Oxford; Tencent AI Lab, Bellevue, WA, USA", + "aff_domain": "cs.hku.hk;cs.ox.ac.uk;cs.hku.hk;cs.ox.ac.uk;tencent.com", + "email": "cs.hku.hk;cs.ox.ac.uk;cs.hku.hk;cs.ox.ac.uk;tencent.com", + "github": "https://github.com/leuchine/self_play_picard", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;1;2", + "aff_unique_norm": "The University of Hong Kong;University of Oxford;Tencent", + "aff_unique_dep": ";;AI Lab", + "aff_unique_url": "https://www.hku.hk;https://www.ox.ac.uk;https://ai.tencent.com", + "aff_unique_abbr": "HKU;Oxford;Tencent AI Lab", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Bellevue", + "aff_country_unique_index": "0;1;0;1;2", + "aff_country_unique": "China;United Kingdom;United States" + }, + { + "id": "2022.emnlp-industry.4", + "title": "Augmenting Operations Research with Auto-Formulation of Optimization Models From Problem Descriptions", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "We describe an augmented intelligence system for simplifying and enhancing the modeling experience for operations research. Using this system, the user receives a suggested formulation of an optimization problem based on its description. To facilitate this process, we build an intuitive user interface system that enables the users to validate and edit the suggestions. We investigate controlled generation techniques to obtain an automatic suggestion of formulation. Then, we evaluate their effectiveness with a newly created dataset of linear programming problems drawn from various application domains.", + "author": "Rindra Ramamonjison; Haley Li; Timothy Yu; Shiqi He; Vishnu Rengan; Amin Banitalebi-dehkordi; Zirui Zhou; Yong Zhang", + "authorids": "/r/rindra-ramamonjison/; /h/haley-li/; /t/timothy-yu/; /s/shiqi-he/; /v/vishnu-rengan/; /a/amin-banitalebi-dehkordi/; /z/zirui-zhou/; /y/yong-zhang/", + "bibtex": "@inproceedings{ramamonjison-etal-2022-augmenting,\n title = \"Augmenting Operations Research with Auto-Formulation of Optimization Models From Problem Descriptions\",\n author = \"Ramamonjison, Rindra and\n Li, Haley and\n Yu, Timothy and\n He, Shiqi and\n Rengan, Vishnu and\n Banitalebi-dehkordi, Amin and\n Zhou, Zirui and\n Zhang, Yong\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.4/\",\n doi = \"10.18653/v1/2022.emnlp-industry.4\",\n pages = \"29--62\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.4.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.4/", + "pdf_size": 1757583, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18140702885023901924&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 4, + "aff": "Huawei Technologies Canada; University of British Columbia, Vancouver; Huawei Technologies Canada; University of British Columbia, Vancouver; University of British Columbia, Vancouver; Huawei Technologies Canada; Huawei Technologies Canada; Huawei Technologies Canada", + "aff_domain": "huawei.com; ; ; ; ; ; ; ", + "email": "huawei.com; ; ; ; ; ; ; ", + "github": "https://github.com/nl4opt/nl4opt-competition", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;1;1;0;0;0", + "aff_unique_norm": "Huawei Technologies;University of British Columbia", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.huawei.com/ca-en/;https://www.ubc.ca", + "aff_unique_abbr": "Huawei;UBC", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Vancouver", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "2022.findings-emnlp.170", + "title": "AutoCAD: Automatically Generate Counterfactuals for Mitigating Shortcut Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent studies have shown the impressive efficacy of counterfactually augmented data (CAD) for reducing NLU models\u2019 reliance on spurious features and improving their generalizability. However, current methods still heavily rely on human efforts or task-specific designs to generate counterfactuals, thereby impeding CAD\u2019s applicability to a broad range of NLU tasks. In this paper, we present AutoCAD, a fully automatic and task-agnostic CAD generation framework. AutoCAD first leverages a classifier to unsupervisedly identify rationales as spans to be intervened, which disentangles spurious and causal features. Then, AutoCAD performs controllable generation enhanced by unlikelihood training to produce diverse counterfactuals. Extensive evaluations on multiple out-of-domain and challenge benchmarks demonstrate that AutoCAD consistently and significantly boosts the out-of-distribution performance of powerful pre-trained models across different NLU tasks, which is comparable or even better than previous state-of-the-art human-in-the-loop or task-specific CAD methods.", + "author": "Jiaxin Wen; Yeshuang Zhu; Jinchao Zhang; Jie Zhou; Minlie Huang", + "authorids": "/j/jiaxin-wen/; /y/yeshuang-zhu/; /j/jinchao-zhang/; /j/jie-zhou/; /m/minlie-huang/", + "bibtex": "@inproceedings{wen-etal-2022-autocad,\n title = \"{A}uto{CAD}: Automatically Generate Counterfactuals for Mitigating Shortcut Learning\",\n author = \"Wen, Jiaxin and\n Zhu, Yeshuang and\n Zhang, Jinchao and\n Zhou, Jie and\n Huang, Minlie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.170/\",\n doi = \"10.18653/v1/2022.findings-emnlp.170\",\n pages = \"2302--2317\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.170.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.170/", + "pdf_size": 410962, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10846153915294272803&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "The CoAI group, Tsinghua University, Beijing, China+Department of Computer Science and Technology, Tsinghua University, Beijing, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; The CoAI group, Tsinghua University, Beijing, China+Department of Computer Science and Technology, Tsinghua University, Beijing, China", + "aff_domain": "mails.tsinghua.edu.cn;tencent.com;tencent.com;tencent.com;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;tencent.com;tencent.com;tencent.com;tsinghua.edu.cn", + "github": "https://github.com/thu-coai/AutoCAD", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;1;1;1;0+0", + "aff_unique_norm": "Tsinghua University;Tencent Inc", + "aff_unique_dep": "The CoAI group;Pattern Recognition Center, WeChat AI", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "THU;Tencent", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.647", + "title": "Automatic Document Selection for Efficient Encoder Pretraining", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Building pretrained language models is considered expensive and data-intensive, but must we increase dataset size to achieve better performance? We propose an alternative to larger training sets by automatically identifying smaller yet domain-representative subsets. We extend Cynical Data Selection, a statistical sentence scoring method that conditions on a representative target domain corpus. As an example, we treat the OntoNotes corpus as a target domain and pretrain a RoBERTa-like encoder from a cynically selected subset of the Pile. On both perplexity and across several downstream tasks in the target domain, it consistently outperforms random selection with 20x less data, 3x fewer training iterations, and 2x less estimated cloud compute cost, validating the recipe of automatic document selection for LM pretraining.", + "author": "Yukun Feng; Patrick Xia; Benjamin Van Durme; Jo\u00e3o Sedoc", + "authorids": "/y/yukun-feng/; /p/patrick-xia/; /b/benjamin-van-durme/; /j/joao-sedoc/", + "bibtex": "@inproceedings{feng-etal-2022-automatic,\n title = \"Automatic Document Selection for Efficient Encoder Pretraining\",\n author = \"Feng, Yukun and\n Xia, Patrick and\n Van Durme, Benjamin and\n Sedoc, Jo{\\~a}o\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.647/\",\n doi = \"10.18653/v1/2022.emnlp-main.647\",\n pages = \"9522--9530\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.647.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.647/", + "pdf_size": 466767, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6148306653848417994&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Johns Hopkins University; Johns Hopkins University; Johns Hopkins University; New York University", + "aff_domain": "jhu.edu;jhu.edu;jhu.edu;stern.nyu.edu", + "email": "jhu.edu;jhu.edu;jhu.edu;stern.nyu.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Johns Hopkins University;New York University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.jhu.edu;https://www.nyu.edu", + "aff_unique_abbr": "JHU;NYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.277", + "title": "Automatic Generation of Socratic Subquestions for Teaching Math Word Problems", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Socratic questioning is an educational method that allows students to discover answers to complex problems by asking them a series of thoughtful questions. Generation of didactically sound questions is challenging, requiring understanding of the reasoning process involved in the problem. We hypothesize that such questioning strategy can not only enhance the human performance, but also assist the math word problem (MWP) solvers.In this work, we explore the ability of large language models (LMs) in generating sequential questions for guiding math word problem-solving. We propose various guided question generation schemes based on input conditioning and reinforcement learning.On both automatic and human quality evaluations, we find that LMs constrained with desirable question properties generate superior questions and improve the overall performance of a math word problem solver. We conduct a preliminary user study to examine the potential value of such question generation models in the education domain. Results suggest that the difficulty level of problems plays an important role in determining whether questioning improves or hinders human performance. We discuss the future of using such questioning strategies in education.", + "author": "Kumar Shridhar; Jakub Macina; Mennatallah El-Assady; Tanmay Sinha; Manu Kapur; Mrinmaya Sachan", + "authorids": "/k/kumar-shridhar/; /j/jakub-macina/; /m/mennatallah-el-assady/; /t/tanmay-sinha/; /m/manu-kapur/; /m/mrinmaya-sachan/", + "bibtex": "@inproceedings{shridhar-etal-2022-automatic,\n title = \"Automatic Generation of Socratic Subquestions for Teaching Math Word Problems\",\n author = \"Shridhar, Kumar and\n Macina, Jakub and\n El-Assady, Mennatallah and\n Sinha, Tanmay and\n Kapur, Manu and\n Sachan, Mrinmaya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.277/\",\n doi = \"10.18653/v1/2022.emnlp-main.277\",\n pages = \"4136--4149\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.277.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.277/", + "pdf_size": 812845, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2746292515800322235&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science, ETH Zurich\u2660; ETH AI Center\u03a6; Professorship for Learning Sciences and Higher Education, ETH Zurich\u25bc; Department of Computer Science, ETH Zurich\u2660; Professorship for Learning Sciences and Higher Education, ETH Zurich\u25bc; Department of Computer Science, ETH Zurich\u2660", + "aff_domain": "ethz.ch;ethz.ch; ; ; ; ", + "email": "ethz.ch;ethz.ch; ; ; ; ", + "github": "https://github.com/eth-nlped/scaffolding-generation", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "ETH Zurich", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.ethz.ch", + "aff_unique_abbr": "ETHZ", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Zurich", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Switzerland" + }, + { + "id": "2022.findings-emnlp.3", + "title": "Automatic Rule Induction for Efficient Semi-Supervised Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Semi-supervised learning has shown promise in allowing NLP models to generalize from small amounts of labeled data. Meanwhile, pretrained transformer models act as black-box correlation engines that are difficult to explain and sometimes behave unreliably. In this paper, we propose tackling both of these challenges via Automatic Rule Induction (ARI), a simple and general-purpose framework for the automatic discovery and integration of symbolic rules into pretrained transformer models. First, we extract weak symbolic rules from low-capacity machine learning models trained on small amounts of labeled data. Next, we use an attention mechanism to integrate these rules into high-capacity pretrained transformer models. Last, the rule-augmented system becomes part of a self-training framework to boost supervision signal on unlabeled data. These steps can be layered beneath a variety of existing weak supervision and semi-supervised NLP algorithms in order to improve performance and interpretability. Experiments across nine sequence classification and relation extraction tasks suggest that ARI can improve state-of-the-art methods with no manual effort and minimal computational overhead.", + "author": "Reid Pryzant; Ziyi Yang; Yichong Xu; Chenguang Zhu; Michael Zeng", + "authorids": "/r/reid-pryzant/; /z/ziyi-yang/; /y/yichong-xu/; /c/chenguang-zhu/; /m/michael-zeng/", + "bibtex": "@inproceedings{pryzant-etal-2022-automatic,\n title = \"Automatic Rule Induction for Efficient Semi-Supervised Learning\",\n author = \"Pryzant, Reid and\n Yang, Ziyi and\n Xu, Yichong and\n Zhu, Chenguang and\n Zeng, Michael\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.3/\",\n doi = \"10.18653/v1/2022.findings-emnlp.3\",\n pages = \"28--44\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.3.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.3/", + "pdf_size": 654463, + "gs_citation": -1, + "gs_cited_by_link": "", + "gs_version_total": 0, + "aff": "Microsoft Cognitive Services Research Group; Microsoft Cognitive Services Research Group; Microsoft Cognitive Services Research Group; Microsoft Cognitive Services Research Group; Microsoft Cognitive Services Research Group", + "aff_domain": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Microsoft", + "aff_unique_dep": "Cognitive Services Research Group", + "aff_unique_url": "https://www.microsoft.com", + "aff_unique_abbr": "Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.28", + "title": "Automatic Scene-based Topic Channel Construction System for E-Commerce", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Scene marketing that well demonstrates user interests within a certain scenario has proved effective for offline shopping. To conduct scene marketing for e-commerce platforms, this work presents a novel product form, scene-based topic channel which typically consists of a list of diverse products belonging to the same usage scenario and a topic title that describes the scenario with marketing words. As manual construction of channels is time-consuming due to billions of products as well as dynamic and diverse customers\u2019 interests, it is necessary to leverage AI techniques to automatically construct channels for certain usage scenarios and even discover novel topics. To be specific, we first frame the channel construction task as a two-step problem, i.e., scene-based topic generation and product clustering, and propose an E-commerce Scene-based Topic Channel construction system (i.e., ESTC) to achieve automated production, consisting of scene-based topic generation model for the e-commerce domain, product clustering on the basis of topic similarity, as well as quality control based on automatic model filtering and human screening. Extensive offline experiments and online A/B test validates the effectiveness of such a novel product form as well as the proposed system. In addition, we also introduce the experience of deploying the proposed system on a real-world e-commerce recommendation platform.", + "author": "Peng Lin; Yanyan Zou; Lingfei Wu; Mian Ma; Zhuoye Ding; Bo Long", + "authorids": "/p/peng-lin/; /y/yanyan-zou/; /l/lingfei-wu/; /m/mian-ma/; /z/zhuoye-ding/; /b/bo-long/", + "bibtex": "@inproceedings{lin-etal-2022-automatic-scene,\n title = \"Automatic Scene-based Topic Channel Construction System for {E}-Commerce\",\n author = \"Lin, Peng and\n Zou, Yanyan and\n Wu, Lingfei and\n Ma, Mian and\n Ding, Zhuoye and\n Long, Bo\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.28/\",\n doi = \"10.18653/v1/2022.emnlp-industry.28\",\n pages = \"272--284\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.28.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.28/", + "pdf_size": 2744658, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10947982871654906653&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "JD.com, Beijing, China; JD.com, Beijing, China; JD.com, Beijing, China; JD.com, Beijing, China; JD.com, Beijing, China; JD.com, Beijing, China", + "aff_domain": "jd.com;jd.com;jd.com;jd.com;jd.com;jd.com", + "email": "jd.com;jd.com;jd.com;jd.com;jd.com;jd.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "JD.com", + "aff_unique_dep": "", + "aff_unique_url": "https://www.jd.com", + "aff_unique_abbr": "JD", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.70", + "title": "Autoregressive Structured Prediction with Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent years have seen a paradigm shift in NLP towards using pretrained language models (PLM) for a wide range of tasks. However, there are many difficult design decisions to represent structures (e.g. tagged text, coreference chains) in a way such that they can be captured by PLMs. Prior work on structured prediction with PLMs typically flattens the structured output into a sequence, which limits the quality of structural information being learned and leads to inferior performance compared to classic discriminative models. In this work, we describe an approach to model structures as sequences of actions in an autoregressive manner with PLMs, allowing in-structure dependencies to be learned without any loss. Our approach achieves the new state-of-the-art on all the structured prediction tasks we looked at, namely, named entity recognition, end-to-end relation extraction, and coreference resolution.", + "author": "Tianyu Liu; Yuchen Eleanor Jiang; Nicholas Monath; Ryan Cotterell; Mrinmaya Sachan", + "authorids": "/t/tianyu-liu/; /y/yuchen-eleanor-jiang/; /n/nicholas-monath/; /r/ryan-cotterell/; /m/mrinmaya-sachan/", + "bibtex": "@inproceedings{liu-etal-2022-autoregressive,\n title = \"Autoregressive Structured Prediction with Language Models\",\n author = \"Liu, Tianyu and\n Jiang, Yuchen Eleanor and\n Monath, Nicholas and\n Cotterell, Ryan and\n Sachan, Mrinmaya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.70/\",\n doi = \"10.18653/v1/2022.findings-emnlp.70\",\n pages = \"993--1005\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.70.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.70/", + "pdf_size": 332796, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15424591465407748238&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "ETH Z\u00fcrich; ETH Z\u00fcrich; Google Research; ETH Z\u00fcrich; ETH Z\u00fcrich", + "aff_domain": "inf.ethz.ch;inf.ethz.ch;google.com;inf.ethz.ch;inf.ethz.ch", + "email": "inf.ethz.ch;inf.ethz.ch;google.com;inf.ethz.ch;inf.ethz.ch", + "github": "https://github.com/lyutyuh/ASP", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "ETH Z\u00fcrich;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.ethz.ch;https://research.google", + "aff_unique_abbr": "ETHZ;Google Research", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "Switzerland;United States" + }, + { + "id": "2022.findings-emnlp.53", + "title": "BARLE: Background-Aware Representation Learning for Background Shift Out-of-Distribution Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Machine learning models often suffer from a performance drop when they are applied to out-of-distribution (OOD) samples, i.e., those drawn far away from the training data distribution. Existing OOD detection work mostly focuses on identifying semantic-shift OOD samples, e.g., instances from unseen new classes. However, background-shift OOD detection, which identifies samples with domain or style-change, represents a more practical yet challenging task. In this paper, we propose Background-Aware Representation Learning (BARLE) for background-shift OOD detection in NLP. Specifically, we generate semantics-preserving background-shifted pseudo OOD samples from pretrained masked language models. We then contrast the in-distribution (ID) samples with their pseudo OOD counterparts. Unlike prior semantic-shift OOD detection work that often leverages an external text corpus, BARLE only uses ID data, which is more flexible and cost-efficient. In experiments across several text classification tasks, we demonstrate that BARLE is capable of improving background-shift OOD detection performance while maintaining ID classification accuracy. We further investigate the properties of the generated pseudo OOD samples, uncovering the working mechanism of BARLE.", + "author": "Hanyu Duan; Yi Yang; Ahmed Abbasi; Kar Yan Tam", + "authorids": "/h/hanyu-duan/; /y/yi-yang/; /a/ahmed-abbasi/; /k/kar-yan-tam/", + "bibtex": "@inproceedings{duan-etal-2022-barle,\n title = \"{BARLE}: Background-Aware Representation Learning for Background Shift Out-of-Distribution Detection\",\n author = \"Duan, Hanyu and\n Yang, Yi and\n Abbasi, Ahmed and\n Tam, Kar Yan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.53/\",\n doi = \"10.18653/v1/2022.findings-emnlp.53\",\n pages = \"750--764\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.53.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.53/", + "pdf_size": 1931104, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4698212048946951230&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "Department of Information Systems, Business Statistics and Operations Management, HKUST; Department of Information Systems, Business Statistics and Operations Management, HKUST; Human-centered Analytics Lab, University of Notre Dame+Department of IT, Analytics, and Operations, University of Notre Dame; Department of Information Systems, Business Statistics and Operations Management, HKUST", + "aff_domain": "connect.ust.hk;ust.hk;nd.edu;ust.hk", + "email": "connect.ust.hk;ust.hk;nd.edu;ust.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1+1;0", + "aff_unique_norm": "Hong Kong University of Science and Technology;University of Notre Dame", + "aff_unique_dep": "Department of Information Systems, Business Statistics and Operations Management;Human-centered Analytics Lab", + "aff_unique_url": "https://www.hkust.edu.hk;https://www.nd.edu", + "aff_unique_abbr": "HKUST;Notre Dame", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Notre Dame", + "aff_country_unique_index": "0;0;1+1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.259", + "title": "BBTv2: Towards a Gradient-Free Future with Large Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Most downstream adaptation methods tune all or part of the parameters of pre-trained models (PTMs) through gradient descent, where the tuning cost increases linearly with the growth of the model size.By contrast, gradient-free methods only require the forward computation of the PTM to tune the prompt, retaining the benefits of efficient tuning and deployment.Though, past work on gradient-free tuning often introduces gradient descent to seek a good initialization of prompt and lacks versatility across tasks and PTMs.In this paper, we present BBTv2, an improved version of Black-Box Tuning, to drive PTMs for few-shot learning.We prepend continuous prompts to every layer of the PTM and propose a divide-and-conquer gradient-free algorithm to optimize the prompts at different layers alternately.Extensive experiments across various tasks and PTMs show that BBTv2 can achieve comparable performance to full model tuning and state-of-the-art parameter-efficient methods (e.g., Adapter, LoRA, BitFit, etc.) under few-shot settings while maintaining much fewer tunable parameters.", + "author": "Tianxiang Sun; Zhengfu He; Hong Qian; Yunhua Zhou; Xuanjing Huang; Xipeng Qiu", + "authorids": "/t/tianxiang-sun/; /z/zhengfu-he/; /h/hong-qian/; /y/yunhua-zhou/; /x/xuan-jing-huang/; /x/xipeng-qiu/", + "bibtex": "@inproceedings{sun-etal-2022-bbtv2,\n title = \"{BBT}v2: Towards a Gradient-Free Future with Large Language Models\",\n author = \"Sun, Tianxiang and\n He, Zhengfu and\n Qian, Hong and\n Zhou, Yunhua and\n Huang, Xuanjing and\n Qiu, Xipeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.259/\",\n doi = \"10.18653/v1/2022.emnlp-main.259\",\n pages = \"3916--3930\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.259.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.259/", + "pdf_size": 1859196, + "gs_citation": 88, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5029498949020662208&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "\u2662School of Computer Science, Fudan University + \u2661Shanghai Key Laboratory of Intelligent Information Processing, Fudan University; \u2662School of Computer Science, Fudan University + \u2661Shanghai Key Laboratory of Intelligent Information Processing, Fudan University; \u2660School of Computer Science and Technology, East China Normal University; \u2662School of Computer Science, Fudan University + \u2661Shanghai Key Laboratory of Intelligent Information Processing, Fudan University; \u2662School of Computer Science, Fudan University + \u2661Shanghai Key Laboratory of Intelligent Information Processing, Fudan University; \u2662School of Computer Science, Fudan University + \u2661Shanghai Key Laboratory of Intelligent Information Processing, Fudan University", + "aff_domain": "fudan.edu.cn;fudan.edu.cn;cs.ecnu.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;fudan.edu.cn;cs.ecnu.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;1;0+0;0+0;0+0", + "aff_unique_norm": "Fudan University;East China Normal University", + "aff_unique_dep": "School of Computer Science;School of Computer Science and Technology", + "aff_unique_url": "https://www.fudan.edu.cn;http://www.ecnu.edu.cn", + "aff_unique_abbr": "Fudan;ECNU", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0;0+0;0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.402", + "title": "BERT Meets CTC: New Formulation of End-to-End Speech Recognition with Pre-trained Masked Language Model", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This paper presents BERT-CTC, a novel formulation of end-to-end speech recognition that adapts BERT for connectionist temporal classification (CTC). Our formulation relaxes the conditional independence assumptions used in conventional CTC and incorporates linguistic knowledge through the explicit output dependency obtained by BERT contextual embedding. BERT-CTC attends to the full contexts of the input and hypothesized output sequences via the self-attention mechanism. This mechanism encourages a model to learn inner/inter-dependencies between the audio and token representations while maintaining CTC\u2019s training efficiency. During inference, BERT-CTC combines a mask-predict algorithm with CTC decoding, which iteratively refines an output sequence. The experimental results reveal that BERT-CTC improves over conventional approaches across variations in speaking styles and languages. Finally, we show that the semantic representations in BERT-CTC are beneficial towards downstream spoken language understanding tasks.", + "author": "Yosuke Higuchi; Brian Yan; Siddhant Arora; Tetsuji Ogawa; Tetsunori Kobayashi; Shinji Watanabe", + "authorids": "/y/yosuke-higuchi/; /b/brian-yan/; /s/siddhant-arora/; /t/tetsuji-ogawa/; /t/tetsunori-kobayashi/; /s/shinji-watanabe/", + "bibtex": "@inproceedings{higuchi-etal-2022-bert,\n title = \"{BERT} Meets {CTC}: New Formulation of End-to-End Speech Recognition with Pre-trained Masked Language Model\",\n author = \"Higuchi, Yosuke and\n Yan, Brian and\n Arora, Siddhant and\n Ogawa, Tetsuji and\n Kobayashi, Tetsunori and\n Watanabe, Shinji\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.402/\",\n doi = \"10.18653/v1/2022.findings-emnlp.402\",\n pages = \"5486--5503\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.402.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.402/", + "pdf_size": 678955, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=714095209248838649&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "Carnegie Mellon University+Waseda University; Carnegie Mellon University; Carnegie Mellon University; Waseda University; Waseda University; Carnegie Mellon University", + "aff_domain": "pcl.cs.waseda.ac.jp; ; ; ; ; ", + "email": "pcl.cs.waseda.ac.jp; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;0;1;1;0", + "aff_unique_norm": "Carnegie Mellon University;Waseda University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cmu.edu;https://www.waseda.jp/top", + "aff_unique_abbr": "CMU;Waseda", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0;1;1;0", + "aff_country_unique": "United States;Japan" + }, + { + "id": "2022.emnlp-main.407", + "title": "BERT in Plutarch\u2019s Shadows", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The extensive surviving corpus of the ancient scholar Plutarch of Chaeronea (ca. 45-120 CE) also contains several texts which, according to current scholarly opinion, did not originate with him and are therefore attributed to an anonymous author Pseudo-Plutarch. These include, in particular, the work Placita Philosophorum (Quotations and Opinions of the Ancient Philosophers), which is extremely important for the history of ancient philosophy. Little is known about the identity of that anonymous author and its relation to other authors from the same period. This paper presents a BERT language model for Ancient Greek. The model discovers previously unknown statistical properties relevant to these literary, philosophical, and historical problems and can shed new light on this authorship question. In particular, the Placita Philosophorum, together with one of the other Pseudo-Plutarch texts, shows similarities with the texts written by authors from an Alexandrian context (2nd/3rd century CE).", + "author": "Ivan P. Yamshchikov; Alexey Tikhonov; Yorgos Pantis; Charlotte Schubert; J\u00fcrgen Jost", + "authorids": "/i/ivan-p-yamshchikov/; /a/alexey-tikhonov/; /y/yorgos-pantis/; /c/charlotte-schubert/; /j/jurgen-jost/", + "bibtex": "@inproceedings{yamshchikov-etal-2022-bert,\n title = \"{BERT} in Plutarch`s Shadows\",\n author = {Yamshchikov, Ivan P. and\n Tikhonov, Alexey and\n Pantis, Yorgos and\n Schubert, Charlotte and\n Jost, J{\\\"u}rgen},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.407/\",\n doi = \"10.18653/v1/2022.emnlp-main.407\",\n pages = \"6071--6080\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.407.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.407/", + "pdf_size": 1638984, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5938874642744286458&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Max Planck Institute for Mathematics in the Sciences, Leipzig, Germany + CEMAPRE, University of Lisbon, Portugal; Inworld.AI, Berlin, Germany; Max Planck Institute for Mathematics in the Sciences, Leipzig, Germany; University of Leipzig, Leipzig, Germany; Max Planck Institute for Mathematics in the Sciences, Leipzig, Germany", + "aff_domain": "yamshchikov.info;gmail.com; ; ; ", + "email": "yamshchikov.info;gmail.com; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;0;3;0", + "aff_unique_norm": "Max Planck Institute for Mathematics in the Sciences;University of Lisbon;Inworld.AI;University of Leipzig", + "aff_unique_dep": ";CEMAPRE;;", + "aff_unique_url": "https://www.mis.mpg.de;https://www.ulisboa.pt;;https://www.uni-leipzig.de", + "aff_unique_abbr": "MPI MIS;;;ULE", + "aff_campus_unique_index": "0;2;0;0;0", + "aff_campus_unique": "Leipzig;;Berlin", + "aff_country_unique_index": "0+1;0;0;0;0", + "aff_country_unique": "Germany;Portugal" + }, + { + "id": "2022.emnlp-main.245", + "title": "BERTScore is Unfair: On Social Bias in Language Model-Based Metrics for Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Automatic evaluation metrics are crucial to the development of generative systems. In recent years, pre-trained language model (PLM) based metrics, such as BERTScore, have been commonly adopted in various generation tasks. However, it has been demonstrated that PLMs encode a range of stereotypical societal biases, leading to a concern about the fairness of PLMs as metrics. To that end, this work presents the first systematic study on the social bias in PLM-based metrics. We demonstrate that popular PLM-based metrics exhibit significantly higher social bias than traditional metrics on 6 sensitive attributes, namely race, gender, religion, physical appearance, age, and socioeconomic status. In-depth analysis suggests that choosing paradigms (matching, regression, or generation) of the metric has a greater impact on fairness than choosing PLMs. In addition, we develop debiasing adapters that are injected into PLM layers, mitigating bias in PLM-based metrics while retaining high performance for evaluating text generation.", + "author": "Tianxiang Sun; Junliang He; Xipeng Qiu; Xuanjing Huang", + "authorids": "/t/tianxiang-sun/; /j/junliang-he/; /x/xipeng-qiu/; /x/xuan-jing-huang/", + "bibtex": "@inproceedings{sun-etal-2022-bertscore,\n title = \"{BERTS}core is Unfair: On Social Bias in Language Model-Based Metrics for Text Generation\",\n author = \"Sun, Tianxiang and\n He, Junliang and\n Qiu, Xipeng and\n Huang, Xuanjing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.245/\",\n doi = \"10.18653/v1/2022.emnlp-main.245\",\n pages = \"3726--3739\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.245.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.245/", + "pdf_size": 1461375, + "gs_citation": 61, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12132102192387225588&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "\u2662School of Computer Science, Fudan University + \u2661Shanghai Key Laboratory of Intelligent Information Processing, Fudan University; \u2662School of Computer Science, Fudan University + \u2661Shanghai Key Laboratory of Intelligent Information Processing, Fudan University; \u2662School of Computer Science, Fudan University + \u2661Shanghai Key Laboratory of Intelligent Information Processing, Fudan University; \u2662School of Computer Science, Fudan University + \u2661Shanghai Key Laboratory of Intelligent Information Processing, Fudan University", + "aff_domain": "fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0+0;0+0", + "aff_unique_norm": "Fudan University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.fudan.edu.cn", + "aff_unique_abbr": "Fudan", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.488", + "title": "BOOKSUM: A Collection of Datasets for Long-form Narrative Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The majority of existing text summarization datasets include short-form source documents that lack long-range causal and temporal dependencies, and often contain strong layout and stylistic biases. While relevant, such datasets will offer limited challenges for future text summarization systems. We address these issues by introducing BOOKSUM, a collection of datasets for long-form narrative summarization. Our dataset covers documents from the literature domain, such as novels, plays and stories, and includes highly abstractive, human written summaries on three levels of granularity of increasing difficulty: paragraph-, chapter-, and book-level. The domain and structure of our dataset poses a unique set of challenges for summarization systems, which include: processing very long documents, non-trivial causal and temporal dependencies, and rich discourse structures. To facilitate future work, we trained and evaluated multiple extractive and abstractive summarization models as baselines for our dataset.", + "author": "Wojciech Kryscinski; Nazneen Rajani; Divyansh Agarwal; Caiming Xiong; Dragomir Radev", + "authorids": "/w/wojciech-kryscinski/; /n/nazneen-rajani/; /d/divyansh-agarwal/; /c/caiming-xiong/; /d/dragomir-radev/", + "bibtex": "@inproceedings{kryscinski-etal-2022-booksum,\n title = \"{BOOKSUM}: A Collection of Datasets for Long-form Narrative Summarization\",\n author = \"Kryscinski, Wojciech and\n Rajani, Nazneen and\n Agarwal, Divyansh and\n Xiong, Caiming and\n Radev, Dragomir\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.488/\",\n doi = \"10.18653/v1/2022.findings-emnlp.488\",\n pages = \"6536--6558\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.488.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.488/", + "pdf_size": 1162454, + "gs_citation": 151, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3357307361304862454&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Salesforce Research; Huggingface; Salesforce Research; Salesforce Research; Yale University", + "aff_domain": "salesforce.com;huggingface.co;salesforce.com;salesforce.com;yale.edu", + "email": "salesforce.com;huggingface.co;salesforce.com;salesforce.com;yale.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;2", + "aff_unique_norm": "Salesforce;Huggingface;Yale University", + "aff_unique_dep": "Salesforce Research;;", + "aff_unique_url": "https://research.salesforce.com;https://huggingface.co;https://www.yale.edu", + "aff_unique_abbr": "Salesforce;Huggingface;Yale", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.177", + "title": "Back to the Future: Bidirectional Information Decoupling Network for Multi-turn Dialogue Modeling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multi-turn dialogue modeling as a challenging branch of natural language understanding (NLU), aims to build representations for machines to understand human dialogues, which provides a solid foundation for multiple downstream tasks. Recent studies of dialogue modeling commonly employ pre-trained language models (PrLMs) to encode the dialogue history as successive tokens, which is insufficient in capturing the temporal characteristics of dialogues. Therefore, we propose Bidirectional Information Decoupling Network (BiDeN) as a universal dialogue encoder, which explicitly incorporates both the past and future contexts and can be generalized to a wide range of dialogue-related tasks. Experimental results on datasets of different downstream tasks demonstrate the universality and effectiveness of our BiDeN.", + "author": "Yiyang Li; Hai Zhao; Zhuosheng Zhang", + "authorids": "/y/yiyang-li/; /h/hai-zhao/; /z/zhuosheng-zhang/", + "bibtex": "@inproceedings{li-etal-2022-back,\n title = \"Back to the Future: Bidirectional Information Decoupling Network for Multi-turn Dialogue Modeling\",\n author = \"Li, Yiyang and\n Zhao, Hai and\n Zhang, Zhuosheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.177/\",\n doi = \"10.18653/v1/2022.emnlp-main.177\",\n pages = \"2761--2774\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.177.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.177/", + "pdf_size": 692875, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12937284256176758683&as_sdt=5,30&sciodt=0,30&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University+Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University+Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University+Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University", + "aff_domain": "sjtu.edu.cn;cs.sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;cs.sjtu.edu.cn;sjtu.edu.cn", + "github": "https://github.com/EricLee8/BiDeN", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;0+0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.6", + "title": "Backdoor Attacks in Federated Learning by Rare Embeddings and Gradient Ensembling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent advances in federated learning have demonstrated its promising capability to learn on decentralized datasets. However, a considerable amount of work has raised concerns due to the potential risks of adversaries participating in the framework to poison the global model for an adversarial purpose. This paper investigates the feasibility of model poisoning for backdoor attacks through rare word embeddings of NLP models. In text classification, less than 1% of adversary clients suffices to manipulate the model output without any drop in the performance of clean sentences. For a less complex dataset, a mere 0.1% of adversary clients is enough to poison the global model effectively. We also propose a technique specialized in the federated learning scheme called gradient ensemble, which enhances the backdoor performance in all experimental settings.", + "author": "Ki Yoon Yoo; Nojun Kwak", + "authorids": "/k/ki-yoon-yoo/; /n/nojun-kwak/", + "bibtex": "@inproceedings{yoo-kwak-2022-backdoor,\n title = \"Backdoor Attacks in Federated Learning by Rare Embeddings and Gradient Ensembling\",\n author = \"Yoo, Ki Yoon and\n Kwak, Nojun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.6/\",\n doi = \"10.18653/v1/2022.emnlp-main.6\",\n pages = \"72--88\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.6.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.6/", + "pdf_size": 685879, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4469548018708854377&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": "Department of Intelligence and Information, Graduate School of Convergence Science and Technology, Seoul National University; Department of Intelligence and Information, Graduate School of Convergence Science and Technology, Seoul National University", + "aff_domain": "snu.ac.kr;snu.ac.kr", + "email": "snu.ac.kr;snu.ac.kr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Seoul National University", + "aff_unique_dep": "Department of Intelligence and Information, Graduate School of Convergence Science and Technology", + "aff_unique_url": "https://www.snu.ac.kr", + "aff_unique_abbr": "SNU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.397", + "title": "Baked-in State Probing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Neural language models have been analyzed for their linguistic and extra-linguistic knowledge via probing. Of particular interest has been the following question: how much can a language model trained only on form learn about meaning? Recent work has demonstrated via probing classifiers that in the setting of simple procedural text, where by \u201cmeaning\u201d we mean the underlying world state, language models have a non-trivial performance on world state tracking. However, our proposed evaluation based on model predictions shows differing results, suggesting that these models are either not capturing the world state or not using it. How do these results change if the model has access to the world state? We explore this alternate setting with access to the underlying world state only during training and investigate ways of \u201cbaking in\u201d the state knowledge along with the primary task of language modeling. Our proposed approaches allow for state probing during inference simply via text prompts, avoiding any probing classifier machinery. In terms of performance, we show that baking in the state knowledge during training leads to significant improvements in state tracking performance and text generation quality,", + "author": "Shubham Toshniwal; Sam Wiseman; Karen Livescu; Kevin Gimpel", + "authorids": "/s/shubham-toshniwal/; /s/sam-wiseman/; /k/karen-livescu/; /k/kevin-gimpel/", + "bibtex": "@inproceedings{toshniwal-etal-2022-baked,\n title = \"Baked-in State Probing\",\n author = \"Toshniwal, Shubham and\n Wiseman, Sam and\n Livescu, Karen and\n Gimpel, Kevin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.397/\",\n doi = \"10.18653/v1/2022.findings-emnlp.397\",\n pages = \"5430--5435\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.397.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.397/", + "pdf_size": 283910, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:jZv0SeFelPcJ:scholar.google.com/&scioq=Baked-in+State+Probing&hl=en&as_sdt=0,33", + "gs_version_total": 0, + "aff": "Meta AI; Duke University; Toyota Technological Institute at Chicago; Toyota Technological Institute at Chicago", + "aff_domain": "meta.com;cs.duke.edu;ttic.edu;ttic.edu", + "email": "meta.com;cs.duke.edu;ttic.edu;ttic.edu", + "github": "https://github.com/facebookresearch/state_probing_lm", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;2", + "aff_unique_norm": "Meta Platforms, Inc.;Duke University;Toyota Technological Institute at Chicago", + "aff_unique_dep": "Meta AI;;", + "aff_unique_url": "https://meta.com;https://www.duke.edu;https://www.tti-chicago.org", + "aff_unique_abbr": "Meta;Duke;TTI Chicago", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Chicago", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.40", + "title": "Balanced Adversarial Training: Balancing Tradeoffs between Fickleness and Obstinacy in NLP Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Traditional (fickle) adversarial examples involve finding a small perturbation that does not change an input\u2019s true label but confuses the classifier into outputting a different prediction. Conversely, obstinate adversarial examples occur when an adversary finds a small perturbation that preserves the classifier\u2019s prediction but changes the true label of an input.Adversarial training and certified robust training have shown some effectiveness in improving the robustness of machine learnt models to fickle adversarial examples. We show that standard adversarial training methods focused on reducing vulnerability to fickle adversarial examples may make a model more vulnerable to obstinate adversarial examples, with experiments for both natural language inference and paraphrase identification tasks. To counter this phenomenon, we introduce Balanced Adversarial Training, which incorporates contrastive learning to increase robustness against both fickle and obstinate adversarial examples.", + "author": "Hannah Chen; Yangfeng Ji; David Evans", + "authorids": "/h/hannah-chen/; /y/yangfeng-ji/; /d/david-k-evans/", + "bibtex": "@inproceedings{chen-etal-2022-balanced,\n title = \"Balanced Adversarial Training: Balancing Tradeoffs between Fickleness and Obstinacy in {NLP} Models\",\n author = \"Chen, Hannah and\n Ji, Yangfeng and\n Evans, David\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.40/\",\n doi = \"10.18653/v1/2022.emnlp-main.40\",\n pages = \"632--647\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.40.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.40/", + "pdf_size": 978236, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9650500191590459697&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, University of Virginia; Department of Computer Science, University of Virginia; Department of Computer Science, University of Virginia", + "aff_domain": "virginia.edu;virginia.edu;virginia.edu", + "email": "virginia.edu;virginia.edu;virginia.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Virginia", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.virginia.edu", + "aff_unique_abbr": "UVA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.779", + "title": "Balancing out Bias: Achieving Fairness Through Balanced Training", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Group bias in natural language processing tasks manifests as disparities in system error rates across texts authorized by different demographic groups, typically disadvantaging minority groups. Dataset balancing has been shown to be effective at mitigating bias, however existing approaches do not directly account for correlations between author demographics and linguistic variables, limiting their effectiveness. To achieve Equal Opportunity fairness, such as equal job opportunity without regard to demographics, this paper introduces a simple, but highly effective, objective for countering bias using balanced training.We extend the method in the form of a gated model, which incorporates protected attributes as input, and show that it is effective at reducing bias in predictions through demographic input perturbation, outperforming all other bias mitigation techniques when combined with balanced training.", + "author": "Xudong Han; Timothy Baldwin; Trevor Cohn", + "authorids": "/x/xudong-han/; /t/timothy-baldwin/; /t/trevor-cohn/", + "bibtex": "@inproceedings{han-etal-2022-balancing,\n title = \"Balancing out Bias: Achieving Fairness Through Balanced Training\",\n author = \"Han, Xudong and\n Baldwin, Timothy and\n Cohn, Trevor\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.779/\",\n doi = \"10.18653/v1/2022.emnlp-main.779\",\n pages = \"11335--11350\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.779.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.779/", + "pdf_size": 1759258, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6779724396052382358&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "The University of Melbourne; The University of Melbourne+MBZUAI; The University of Melbourne", + "aff_domain": "student.unimelb.edu.au;unimelb.edu.au;unimelb.edu.au", + "email": "student.unimelb.edu.au;unimelb.edu.au;unimelb.edu.au", + "github": "https://github.com/HanXudong/Achieving_Fairness_Through_Balanced_Training", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;0", + "aff_unique_norm": "University of Melbourne;Mohamed Bin Zayed University of Artificial Intelligence", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.unimelb.edu.au;https://www.mbzuai.ac.ae", + "aff_unique_abbr": "UniMelb;MBZUAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;0", + "aff_country_unique": "Australia;United Arab Emirates" + }, + { + "id": "2022.findings-emnlp.186", + "title": "BanglaRQA: A Benchmark Dataset for Under-resourced Bangla Language Reading Comprehension-based Question Answering with Diverse Question-Answer Types", + "track": "main", + "status": "finding", + "award": false, + "abstract": "High-resource languages, such as English, have access to a plethora of datasets with various question-answer types resembling real-world reading comprehension. However, there is a severe lack of diverse and comprehensive question-answering datasets in under-resourced languages like Bangla. The ones available are either translated versions of English datasets with a niche answer format or created by human annotations focusing on a specific domain, question type, or answer type. To address these limitations, this paper introduces BanglaRQA, a reading comprehension-based Bangla question-answering dataset with various question-answer types. BanglaRQA consists of 3,000 context passages and 14,889 question-answer pairs created from those passages. The dataset comprises answerable and unanswerable questions covering four unique categories of questions and three types of answers. In addition, this paper also implemented four different Transformer models for question-answering on the proposed dataset. The best-performing model achieved an overall 62.42% EM and 78.11% F1 score. However, detailed analyses showed that the performance varies across question-answer types, leaving room for substantial improvement of the model performance. Furthermore, we demonstrated the effectiveness of BanglaRQA as a training resource by showing strong results on the bn_squad dataset. Therefore, BanglaRQA has the potential to contribute to the advancement of future research by enhancing the capability of language models. The dataset and codes are available at https://github.com/sartajekram419/BanglaRQA", + "author": "Syed Mohammed Sartaj Ekram; Adham Arik Rahman; Md. Sajid Altaf; Mohammed Saidul Islam; Mehrab Mustafy Rahman; Md Mezbaur Rahman; Md Azam Hossain; Abu Raihan Mostofa Kamal", + "authorids": "/s/syed-mohammed-sartaj-ekram/; /a/adham-arik-rahman/; /m/md-sajid-altaf/; /m/mohammed-saidul-islam/; /m/mehrab-mustafy-rahman/; /m/md-mezbaur-rahman/; /m/md-azam-hossain/; /a/abu-raihan-mostofa-kamal/", + "bibtex": "@inproceedings{ekram-etal-2022-banglarqa,\n title = \"{B}angla{RQA}: A Benchmark Dataset for Under-resourced {B}angla Language Reading Comprehension-based Question Answering with Diverse Question-Answer Types\",\n author = \"Ekram, Syed Mohammed Sartaj and\n Rahman, Adham Arik and\n Altaf, Md. Sajid and\n Islam, Mohammed Saidul and\n Rahman, Mehrab Mustafy and\n Rahman, Md Mezbaur and\n Hossain, Md Azam and\n Kamal, Abu Raihan Mostofa\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.186/\",\n doi = \"10.18653/v1/2022.findings-emnlp.186\",\n pages = \"2518--2532\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.186.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.186/", + "pdf_size": 2919409, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7135308720618522251&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/sartajekram419/BanglaRQA", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.224", + "title": "Benchmarking Language Models for Code Syntax Understanding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Pre-trained language models have demonstrated impressive performance in both natural language processing and program understanding, which represent the input as a token sequence without explicitly modeling its structure. Some prior works show that pre-trained language models can capture the syntactic rules of natural languages without finetuning on syntax understanding tasks. However, there is limited understanding of how well pre-trained models understand the code structure so far. In this work, we perform the first thorough benchmarking of the state-of-the-art pre-trained models for identifying the syntactic structures of programs. Specifically, we introduce CodeSyntax, a large-scale dataset of programs annotated with the syntactic relationships in their corresponding abstract syntax trees. Our key observation is that pre-training on massive code data does not result in decent code syntax understanding. In fact, these pre-trained programming language models fail to match the performance of naive baselines based on positional offsets and keywords. We also present a natural language benchmark to highlight the differences between natural languages and programming languages in terms of understanding corresponding syntactic structures. Our findings point out key limitations of existing pre-training methods and suggest the importance of modeling syntactic structures for the programming language.", + "author": "Da Shen; Xinyun Chen; Chenguang Wang; Koushik Sen; Dawn Song", + "authorids": "/d/da-shen/; /x/xinyun-chen/; /c/chenguang-wang/; /k/koushik-sen/; /d/dawn-song/", + "bibtex": "@inproceedings{shen-etal-2022-benchmarking,\n title = \"Benchmarking Language Models for Code Syntax Understanding\",\n author = \"Shen, Da and\n Chen, Xinyun and\n Wang, Chenguang and\n Sen, Koushik and\n Song, Dawn\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.224/\",\n doi = \"10.18653/v1/2022.findings-emnlp.224\",\n pages = \"3071--3093\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.224.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.224/", + "pdf_size": 685745, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15953686425342018501&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of Maryland, College Park; Google Research, Brain Team; Washington University in St. Louis; University of California, Berkeley; University of California, Berkeley", + "aff_domain": "terpmail.umd.edu;google.com;wustl.edu;cs.berkeley.edu;cs.berkeley.edu", + "email": "terpmail.umd.edu;google.com;wustl.edu;cs.berkeley.edu;cs.berkeley.edu", + "github": "https://github.com/dashends/CodeSyntax", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;3", + "aff_unique_norm": "University of Maryland;Google;Washington University in St. Louis;University of California, Berkeley", + "aff_unique_dep": ";Google Research;;", + "aff_unique_url": "https://www/umd.edu;https://research.google;https://wustl.edu;https://www.berkeley.edu", + "aff_unique_abbr": "UMD;Google;WashU;UC Berkeley", + "aff_campus_unique_index": "0;1;2;3;3", + "aff_campus_unique": "College Park;Mountain View;St. Louis;Berkeley", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.415", + "title": "Bernice: A Multilingual Pre-trained Encoder for Twitter", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The language of Twitter differs significantly from that of other domains commonly included in large language model training. While tweets are typically multilingual and contain informal language, including emoji and hashtags, most pre-trained language models for Twitter are either monolingual, adapted from other domains rather than trained exclusively on Twitter, or are trained on a limited amount of in-domain Twitter data.We introduce Bernice, the first multilingual RoBERTa language model trained from scratch on 2.5 billion tweets with a custom tweet-focused tokenizer. We evaluate on a variety of monolingual and multilingual Twitter benchmarks, finding that our model consistently exceeds or matches the performance of a variety of models adapted to social media data as well as strong multilingual baselines, despite being trained on less data overall.We posit that it is more efficient compute- and data-wise to train completely on in-domain data with a specialized domain-specific tokenizer.", + "author": "Alexandra DeLucia; Shijie Wu; Aaron Mueller; Carlos Aguirre; Philip Resnik; Mark Dredze", + "authorids": "/a/alexandra-delucia/; /s/shijie-wu/; /a/aaron-mueller/; /c/carlos-aguirre/; /p/philip-resnik/; /m/mark-dredze/", + "bibtex": "@inproceedings{delucia-etal-2022-bernice,\n title = \"Bernice: A Multilingual Pre-trained Encoder for {T}witter\",\n author = \"DeLucia, Alexandra and\n Wu, Shijie and\n Mueller, Aaron and\n Aguirre, Carlos and\n Resnik, Philip and\n Dredze, Mark\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.415/\",\n doi = \"10.18653/v1/2022.emnlp-main.415\",\n pages = \"6191--6205\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.415.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.415/", + "pdf_size": 498652, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7937761037562902011&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff": "CenterforLanguageandSpeechProcessing, JohnsHopkinsUniversity; CenterforLanguageandSpeechProcessing, JohnsHopkinsUniversity; CenterforLanguageandSpeechProcessing, JohnsHopkinsUniversity; CenterforLanguageandSpeechProcessing, JohnsHopkinsUniversity; CenterforLanguageandSpeechProcessing, JohnsHopkinsUniversity; Linguistics/UMIACS, UniversityofMaryland", + "aff_domain": "jhu.edu;jhu.edu;jhu.edu;jhu.edu;jhu.edu;umd.edu", + "email": "jhu.edu;jhu.edu;jhu.edu;jhu.edu;jhu.edu;umd.edu", + "github": "https://github.com/JHU-CLSP/Bernice-Twitter-encoder", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;1", + "aff_unique_norm": "Johns Hopkins University;University of Maryland", + "aff_unique_dep": "Center for Language and Speech Processing;Linguistics/UMIACS", + "aff_unique_url": "https://www.jhu.edu;https://www/umd.edu", + "aff_unique_abbr": "JHU;UMD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.471", + "title": "Better Few-Shot Relation Extraction with Label Prompt Dropout", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Few-shot relation extraction aims to learn to identify the relation between two entities based on very limited training examples. Recent efforts found that textual labels (i.e., relation names and relation descriptions) could be extremely useful for learning class representations, which will benefit the few-shot learning task. However, what is the best way to leverage such label information in the learning process is an important research question. Existing works largely assume such textual labels are always present during both learning and prediction. In this work, we argue that such approaches may not always lead to optimal results. Instead, we present a novel approach called label prompt dropout, which randomly removes label descriptions in the learning process. Our experiments show that our approach is able to lead to improved class representations, yielding significantly better results on the few-shot relation extraction task.", + "author": "Peiyuan Zhang; Wei Lu", + "authorids": "/p/peiyuan-zhang/; /w/wei-lu/", + "bibtex": "@inproceedings{zhang-lu-2022-better,\n title = \"Better Few-Shot Relation Extraction with Label Prompt Dropout\",\n author = \"Zhang, Peiyuan and\n Lu, Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.471/\",\n doi = \"10.18653/v1/2022.emnlp-main.471\",\n pages = \"6996--7006\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.471.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.471/", + "pdf_size": 2902220, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13362523567553021184&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 3, + "aff": "StatNLP Research Group, Singapore University of Technology and Design; StatNLP Research Group, Singapore University of Technology and Design", + "aff_domain": "sutd.edu.sg;sutd.edu.sg", + "email": "sutd.edu.sg;sutd.edu.sg", + "github": "https://github.com/jzhang38/LPD", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Singapore University of Technology and Design", + "aff_unique_dep": "StatNLP Research Group", + "aff_unique_url": "https://www.sutd.edu.sg", + "aff_unique_abbr": "SUTD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "2022.emnlp-main.575", + "title": "Better Hit the Nail on the Head than Beat around the Bush: Removing Protected Attributes with a Single Projection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Bias elimination and recent probing studies attempt to remove specific information from embedding spaces. Here it is important to remove as much of the target information as possible, while preserving any other information present. INLP is a popular recent method which removes specific information through iterative nullspace projections.Multiple iterations, however, increase the risk that information other than the target is negatively affected.We introduce two methods that find a single targeted projection: Mean Projection (MP, more efficient) and Tukey Median Projection (TMP, with theoretical guarantees). Our comparison between MP and INLP shows that (1) one MP projection removes linear separability based on the target and (2) MP has less impact on the overall space.Further analysis shows that applying random projections after MP leads to the same overall effects on the embedding space as the multiple projections of INLP. Applying one targeted (MP) projection hence is methodologically cleaner than applying multiple (INLP) projections that introduce random effects.", + "author": "Pantea Haghighatkhah; Antske Fokkens; Pia Sommerauer; Bettina Speckmann; Kevin Verbeek", + "authorids": "/p/pantea-haghighatkhah/; /a/antske-fokkens/; /p/pia-sommerauer/; /b/bettina-speckmann/; /k/kevin-verbeek/", + "bibtex": "@inproceedings{haghighatkhah-etal-2022-better,\n title = \"Better Hit the Nail on the Head than Beat around the Bush: Removing Protected Attributes with a Single Projection\",\n author = \"Haghighatkhah, Pantea and\n Fokkens, Antske and\n Sommerauer, Pia and\n Speckmann, Bettina and\n Verbeek, Kevin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.575/\",\n doi = \"10.18653/v1/2022.emnlp-main.575\",\n pages = \"8395--8416\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.575.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.575/", + "pdf_size": 1121530, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15887045709463483500&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "TU Eindhoven, Department of Mathematics and Computer Science\u2662; Vrije Universiteit Amsterdam, Computational Linguistics Text Mining Lab\u2663; TU Eindhoven, Department of Mathematics and Computer Science\u2662; TU Eindhoven, Department of Mathematics and Computer Science\u2662; TU Eindhoven, Department of Mathematics and Computer Science\u2662", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Eindhoven University of Technology;Vrije Universiteit Amsterdam", + "aff_unique_dep": "Department of Mathematics and Computer Science;Computational Linguistics Text Mining Lab", + "aff_unique_url": "https://www.tue.nl;https://www.vu.nl", + "aff_unique_abbr": "TU/e;VU Amsterdam", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Eindhoven;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Netherlands" + }, + { + "id": "2022.findings-emnlp.344", + "title": "Beyond Additive Fusion: Learning Non-Additive Multimodal Interactions", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multimodal fusion addresses the problem of analyzing spoken words in the multimodal context, including visual expressions and prosodic cues. Even when multimodal models lead to performance improvements, it is often unclear whether bimodal and trimodal interactions are learned or whether modalities are processed independently of each other. We propose Multimodal Residual Optimization (MRO) to separate unimodal, bimodal, and trimodal interactions in a multimodal model. This improves interpretability as the multimodal interaction can be quantified. Inspired by Occam\u2019s razor, the main intuition of MRO is that (simpler) unimodal contributions should be learned before learning (more complex) bimodal and trimodal interactions. For example, bimodal predictions should learn to correct the mistakes (residuals) of unimodal predictions, thereby letting the bimodal predictions focus on the remaining bimodal interactions. Empirically, we observe that MRO successfully separates unimodal, bimodal, and trimodal interactions while not degrading predictive performance. We complement our empirical results with a human perception study and observe that MRO learns multimodal interactions that align with human judgments.", + "author": "Torsten W\u00f6rtwein; Lisa Sheeber; Nicholas Allen; Jeffrey Cohn; Louis-Philippe Morency", + "authorids": "/t/torsten-wortwein/; /l/lisa-sheeber/; /n/nicholas-allen/; /j/jeffrey-cohn/; /l/louis-philippe-morency/", + "bibtex": "@inproceedings{wortwein-etal-2022-beyond,\n title = \"Beyond Additive Fusion: Learning Non-Additive Multimodal Interactions\",\n author = {W{\\\"o}rtwein, Torsten and\n Sheeber, Lisa and\n Allen, Nicholas and\n Cohn, Jeffrey and\n Morency, Louis-Philippe},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.344/\",\n doi = \"10.18653/v1/2022.findings-emnlp.344\",\n pages = \"4681--4696\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.344.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.344/", + "pdf_size": 376783, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12599365337283887245&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Language Technologies Institute, Carnegie Mellon University; Oregon Research Institute; Department of Psychology, University of Oregon; Department of Psychology, University of Pittsburgh; Language Technologies Institute, Carnegie Mellon University", + "aff_domain": "cs.cmu.edu;ori.org;uoregon.edu;pitt.edu;cs.cmu.edu", + "email": "cs.cmu.edu;ori.org;uoregon.edu;pitt.edu;cs.cmu.edu", + "github": "https://github.com/twoertwein/MultimodalResidualOptimization", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;0", + "aff_unique_norm": "Carnegie Mellon University;Oregon Research Institute;University of Oregon;University of Pittsburgh", + "aff_unique_dep": "Language Technologies Institute;;Department of Psychology;Department of Psychology", + "aff_unique_url": "https://www.cmu.edu;https://www.ori.org;https://www.uoregon.edu;https://www.pitt.edu", + "aff_unique_abbr": "CMU;;UO;Pitt", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Pittsburgh;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.273", + "title": "Beyond Counting Datasets: A Survey of Multilingual Dataset Construction and Necessary Resources", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While the NLP community is generally aware of resource disparities among languages, we lack research that quantifies the extent and types of such disparity. Prior surveys estimating the availability of resources based on the number of datasets can be misleading as dataset quality varies: many datasets are automatically induced or translated from English data. To provide a more comprehensive picture of language resources, we examine the characteristics of 156 publicly available NLP datasets. We manually annotate how they are created, including input text and label sources and tools used to build them, and what they study, tasks they address and motivations for their creation. After quantifying the qualitative NLP resource gap across languages, we discuss how to improve data collection in low-resource languages. We survey language-proficient NLP researchers and crowd workers per language, finding that their estimated availability correlates with dataset availability. Through crowdsourcing experiments, we identify strategies for collecting high-quality multilingual data on the Mechanical Turk platform. We conclude by making macro and micro-level suggestions to the NLP community and individual researchers for future multilingual data development.", + "author": "Xinyan Yu; Trina Chatterjee; Akari Asai; Junjie Hu; Eunsol Choi", + "authorids": "/x/xinyan-yu/; /t/trina-chatterjee/; /a/akari-asai/; /j/junjie-hu/; /e/eunsol-choi/", + "bibtex": "@inproceedings{yu-etal-2022-beyond,\n title = \"Beyond Counting Datasets: A Survey of Multilingual Dataset Construction and Necessary Resources\",\n author = \"Yu, Xinyan and\n Chatterjee, Trina and\n Asai, Akari and\n Hu, Junjie and\n Choi, Eunsol\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.273/\",\n doi = \"10.18653/v1/2022.findings-emnlp.273\",\n pages = \"3725--3743\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.273.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.273/", + "pdf_size": 1222617, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5345576560241937997&as_sdt=10005&sciodt=0,8&hl=en", + "gs_version_total": 5, + "aff": "University of Washington; University of Washington; The University of Texas at Austin; The University of Wisconsin-Madison; The University of Texas at Austin", + "aff_domain": "cs.washington.edu;cs.washington.edu;utexas.edu;wisc.edu;utexas.edu", + "email": "cs.washington.edu;cs.washington.edu;utexas.edu;wisc.edu;utexas.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;1", + "aff_unique_norm": "University of Washington;University of Texas at Austin;University of Wisconsin-Madison", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.washington.edu;https://www.utexas.edu;https://www.wisc.edu", + "aff_unique_abbr": "UW;UT Austin;UW-Madison", + "aff_campus_unique_index": "1;2;1", + "aff_campus_unique": ";Austin;Madison", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.100", + "title": "Beyond Model Interpretability: On the Faithfulness and Adversarial Robustness of Contrastive Textual Explanations", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Contrastive explanation methods go beyond transparency and address the contrastive aspect of explanations. Such explanations are emerging as an attractive option to provide actionable change to scenarios adversely impacted by classifiers\u2019 decisions. However, their extension to textual data is under-explored and there is little investigation on their vulnerabilities and limitations. This work motivates textual counterfactuals by highlighting the social limitations of non-contrastive explainability. We also lay the ground for a novel evaluation scheme inspired by the faithfulness of explanations. Accordingly, we extend the computation of three metrics, proximity, connectedness and stability, to textual data and we benchmark two successful contrastive methods, POLYJUICE and MiCE, on our suggested metrics. Experiments on sentiment analysis data show that the connectedness of counterfactuals to their original counterparts is not obvious in both models. More interestingly, the generated contrastive texts are more attainable with POLYJUICE which highlights the significance of latent representations in counterfactual search. Finally, we perform the first semantic adversarial attack on textual recourse methods. The results demonstrate the robustness of POLYJUICE and the role that latent input representations play in robustness and reliability.", + "author": "Julia El Zini; Mariette Awad", + "authorids": "/j/julia-el-zini/; /m/mariette-awad/", + "bibtex": "@inproceedings{el-zini-awad-2022-beyond,\n title = \"Beyond Model Interpretability: On the Faithfulness and Adversarial Robustness of Contrastive Textual Explanations\",\n author = \"El Zini, Julia and\n Awad, Mariette\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.100/\",\n doi = \"10.18653/v1/2022.findings-emnlp.100\",\n pages = \"1391--1402\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.100.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.100/", + "pdf_size": 692660, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5393094352754360571&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 3, + "aff": "Electrical and Computer Engineering Department, American University of Beirut; Electrical and Computer Engineering Department, American University of Beirut", + "aff_domain": "mail.aub.edu;aub.edu.lb", + "email": "mail.aub.edu;aub.edu.lb", + "github": "", + "project": "https://gitlab.com/awadailab/faithful-contrastive-explanations", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "American University of Beirut", + "aff_unique_dep": "Electrical and Computer Engineering Department", + "aff_unique_url": "https://www.aub.edu.lb", + "aff_unique_abbr": "AUB", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Lebanon" + }, + { + "id": "2022.emnlp-main.587", + "title": "Beyond prompting: Making Pre-trained Language Models Better Zero-shot Learners by Clustering Representations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work has demonstrated that pre-trained language models (PLMs) are zero-shot learners. However, most existing zero-shot methods involve heavy human engineering or complicated self-training pipelines, hindering their application to new situations. In this work, we show that zero-shot text classification can be improved simply by clustering texts in the embedding spaces of PLMs. Specifically, we fit the unlabeled texts with a Bayesian Gaussian Mixture Model after initializing cluster positions and shapes using class names. Despite its simplicity, this approach achieves superior or comparable performance on both topic and sentiment classification datasets and outperforms prior works significantly on unbalanced datasets. We further explore the applicability of our clustering approach by evaluating it on 14 datasets with more diverse topics, text lengths, and numbers of classes. Our approach achieves an average of 20% absolute improvement over prompt-based zero-shot learning. Finally, we compare different PLM embedding spaces and find that texts are well-clustered by topics even if the PLM is not explicitly pre-trained to generate meaningful sentence embeddings. This work indicates that PLM embeddings can categorize texts without task-specific fine-tuning, thus providing a new way to analyze and utilize their knowledge and zero-shot learning ability.", + "author": "Yu Fei; Zhao Meng; Ping Nie; Roger Wattenhofer; Mrinmaya Sachan", + "authorids": "/y/yu-fei/; /z/zhao-meng/; /p/ping-nie/; /r/roger-wattenhofer/; /m/mrinmaya-sachan/", + "bibtex": "@inproceedings{fei-etal-2022-beyond,\n title = \"Beyond prompting: Making Pre-trained Language Models Better Zero-shot Learners by Clustering Representations\",\n author = \"Fei, Yu and\n Meng, Zhao and\n Nie, Ping and\n Wattenhofer, Roger and\n Sachan, Mrinmaya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.587/\",\n doi = \"10.18653/v1/2022.emnlp-main.587\",\n pages = \"8560--8579\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.587.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.587/", + "pdf_size": 1522715, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14038390894714722688&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "ETH Zurich; ETH Zurich; Peking University; ETH Zurich; ETH Zurich", + "aff_domain": "gmail.com;ethz.ch;pku.edu.cn;ethz.ch;inf.ethz.ch", + "email": "gmail.com;ethz.ch;pku.edu.cn;ethz.ch;inf.ethz.ch", + "github": "https://github.com/fywalter/simptc", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "ETH Zurich;Peking University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ethz.ch;http://www.pku.edu.cn", + "aff_unique_abbr": "ETHZ;Peking U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "Switzerland;China" + }, + { + "id": "2022.emnlp-main.419", + "title": "Bi-Directional Iterative Prompt-Tuning for Event Argument Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently, prompt-tuning has attracted growing interests in event argument extraction (EAE). However, the existing prompt-tuning methods have not achieved satisfactory performance due to the lack of consideration of entity information. In this paper, we propose a bi-directional iterative prompt-tuning method for EAE, where the EAE task is treated as a cloze-style task to take full advantage of entity information and pre-trained language models (PLMs). Furthermore, our method explores event argument interactions by introducing the argument roles of contextual entities into prompt construction. Since template and verbalizer are two crucial components in a cloze-style prompt, we propose to utilize the role label semantic knowledge to construct a semantic verbalizer and design three kind of templates for the EAE task. Experiments on the ACE 2005 English dataset with standard and low-resource settings show that the proposed method significantly outperforms the peer state-of-the-art methods.", + "author": "Lu Dai; Bang Wang; Wei Xiang; Yijun Mo", + "authorids": "/l/lu-dai/; /b/bang-wang/; /w/wei-xiang/; /y/yijun-mo/", + "bibtex": "@inproceedings{dai-etal-2022-bi,\n title = \"Bi-Directional Iterative Prompt-Tuning for Event Argument Extraction\",\n author = \"Dai, Lu and\n Wang, Bang and\n Xiang, Wei and\n Mo, Yijun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.419/\",\n doi = \"10.18653/v1/2022.emnlp-main.419\",\n pages = \"6251--6263\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.419.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.419/", + "pdf_size": 1414669, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15650126717280968308&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff": "School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China; School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China; School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China; School of Computer Science and Technology, Huazhong University of Science and Technology, Wuhan, China", + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn;hust.edu.cn", + "github": "https://github.com/HustMinsLab/BIP", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Huazhong University of Science and Technology", + "aff_unique_dep": "School of Electronic Information and Communications", + "aff_unique_url": "http://www.hust.edu.cn", + "aff_unique_abbr": "HUST", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Wuhan", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.164", + "title": "Bilingual Lexicon Induction for Low-Resource Languages using Graph Matching via Optimal Transport", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Bilingual lexicons form a critical component of various natural language processing applications, including unsupervised and semisupervised machine translation and crosslingual information retrieval. In this work, we improve bilingual lexicon induction performance across 40 language pairs with a graph-matching method based on optimal transport. The method is especially strong with low amounts of supervision.", + "author": "Kelly Marchisio; Ali Saad-Eldin; Kevin Duh; Carey Priebe; Philipp Koehn", + "authorids": "/k/kelly-marchisio/; /a/ali-saad-eldin/; /k/kevin-duh/; /c/carey-priebe/; /p/philipp-koehn/", + "bibtex": "@inproceedings{marchisio-etal-2022-bilingual,\n title = \"Bilingual Lexicon Induction for Low-Resource Languages using Graph Matching via Optimal Transport\",\n author = \"Marchisio, Kelly and\n Saad-Eldin, Ali and\n Duh, Kevin and\n Priebe, Carey and\n Koehn, Philipp\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.164/\",\n doi = \"10.18653/v1/2022.emnlp-main.164\",\n pages = \"2545--2561\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.164.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.164/", + "pdf_size": 835782, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9743306404939999815&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science+Human Language Technology Center of Excellence; Department of Biomedical Engineering; Department of Computer Science+Human Language Technology Center of Excellence; Department of Applied Mathematics and Statistics+Human Language Technology Center of Excellence; Department of Computer Science", + "aff_domain": "jhu.edu;jhu.edu;cs.jhu.edu;jhu.edu;jhu.edu", + "email": "jhu.edu;jhu.edu;cs.jhu.edu;jhu.edu;jhu.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;0+1;3+1;0", + "aff_unique_norm": "Unknown Institution;Human Language Technology Center of Excellence;Department of Biomedical Engineering;Department of Applied Mathematics and Statistics", + "aff_unique_dep": "Department of Computer Science;;Biomedical Engineering;Applied Mathematics and Statistics", + "aff_unique_url": ";;;", + "aff_unique_abbr": ";;;", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "1;1;1", + "aff_country_unique": ";United States" + }, + { + "id": "2022.emnlp-main.548", + "title": "Bilingual Synchronization: Restoring Translational Relationships with Editing Operations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Machine Translation (MT) is usually viewed as a one-shot process that generates the target language equivalent of some source text from scratch. We consider here a more general setting which assumes an initial target sequence, that must be transformed into a valid translation of the source, thereby restoring parallelism between source and target. For this bilingual synchronization task, we consider several architectures (both autoregressive and non-autoregressive) and training regimes, and experiment with multiple practical settings such as simulated interactive MT, translating with Translation Memory (TM) and TM cleaning. Our results suggest that one single generic edit-based system, once fine-tuned, can compare with, or even outperform, dedicated systems specifically trained for these tasks.", + "author": "Jitao Xu; Josep Crego; Fran\u00e7ois Yvon", + "authorids": "/j/jitao-xu/; /j/josep-m-crego/; /f/francois-yvon/", + "bibtex": "@inproceedings{xu-etal-2022-bilingual,\n title = \"Bilingual Synchronization: Restoring Translational Relationships with Editing Operations\",\n author = \"Xu, Jitao and\n Crego, Josep and\n Yvon, Fran{\\c{c}}ois\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.548/\",\n doi = \"10.18653/v1/2022.emnlp-main.548\",\n pages = \"8016--8030\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.548.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.548/", + "pdf_size": 360671, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9842369656307864768&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 13, + "aff": "Universit\u00e9 Paris-Saclay, CNRS, LISN, 91400, Orsay, France; SYSTRAN, 5 rue Feydeau, 75002, Paris, France; Universit\u00e9 Paris-Saclay, CNRS, LISN, 91400, Orsay, France", + "aff_domain": "limsi.fr;systrangroup.com;limsi.fr", + "email": "limsi.fr;systrangroup.com;limsi.fr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Universit\u00e9 Paris-Saclay;SYSTRAN", + "aff_unique_dep": "LISN;", + "aff_unique_url": "https://www.universite-paris-saclay.fr;", + "aff_unique_abbr": "UPS;", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Orsay;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "France" + }, + { + "id": "2022.findings-emnlp.104", + "title": "BioLORD: Learning Ontological Representations from Definitions for Biomedical Concepts and their Textual Descriptions", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This work introduces BioLORD, a new pre-training strategy for producing meaningful representations for clinical sentences and biomedical concepts. State-of-the-art methodologies operate by maximizing the similarity in representation of names referring to the same concept, and preventing collapse through contrastive learning. However, because biomedical names are not always self-explanatory, it sometimes results in non-semantic representations. BioLORD overcomes this issue by grounding its concept representations using definitions, as well as short descriptions derived from a multi-relational knowledge graph consisting of biomedical ontologies. Thanks to this grounding, our model produces more semantic concept representations that match more closely the hierarchical structure of ontologies. BioLORD establishes a new state of the art for text similarity on both clinical sentences (MedSTS) and biomedical concepts (MayoSRS).", + "author": "Fran\u00e7ois Remy; Kris Demuynck; Thomas Demeester", + "authorids": "/f/francois-remy/; /k/kris-demuynck/; /t/thomas-demeester/", + "bibtex": "@inproceedings{remy-etal-2022-biolord,\n title = \"{B}io{LORD}: Learning Ontological Representations from Definitions for Biomedical Concepts and their Textual Descriptions\",\n author = \"Remy, Fran{\\c{c}}ois and\n Demuynck, Kris and\n Demeester, Thomas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.104/\",\n doi = \"10.18653/v1/2022.findings-emnlp.104\",\n pages = \"1454--1465\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.104.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.104/", + "pdf_size": 423169, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11151853367681935890&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "The Internet and Data Science Lab (IDLab); Ghent University (UGent) - Imec Belgium; Ghent University (UGent) - Imec Belgium", + "aff_domain": "ugent.be; ; ", + "email": "ugent.be; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "The Internet and Data Science Lab;Ghent University", + "aff_unique_dep": "Data Science;", + "aff_unique_url": ";https://www.ugent.be", + "aff_unique_abbr": "IDLab;UGent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "1;1", + "aff_country_unique": ";Belgium" + }, + { + "id": "2022.findings-emnlp.374", + "title": "BioNLI: Generating a Biomedical NLI Dataset Using Lexico-semantic Constraints for Adversarial Examples", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Natural language inference (NLI) is critical in many domains requiring complex decision-making, such as the biomedical domain. We introduce a novel semi-supervised procedure that bootstraps biomedical NLI datasets from positive entailment examples present in abstracts of biomedical publications. We focus on challenging texts where the hypothesis includes mechanistic information such as biochemical interactions between two entities. A key contribution of this work is automating the creation of negative examples that are informative without being simplistic. We generate a range of negative examples using nine strategies that manipulate the structure of the underlying mechanisms both with rules, e.g., flip the roles of the entities in the interaction, and, more importantly, by imposing the perturbed conditions as logical constraints in a neuro-logical decoding system (CITATION).We use this procedure to create a novel dataset for NLI in the biomedical domain, called . The accuracy of neural classifiers on this dataset is in the mid 70s F1, which indicates that this NLI task remains to be solved. Critically, we observe that the performance on the different classes of negative examples varies widely, from 97% F1 on the simple negative examples that change the role of the entities in the hypothesis, to barely better than chance on the negative examples generated using neuro-logic decoding.", + "author": "Mohaddeseh Bastan; Mihai Surdeanu; Niranjan Balasubramanian", + "authorids": "/m/mohaddeseh-bastan/; /m/mihai-surdeanu/; /n/niranjan-balasubramanian/", + "bibtex": "@inproceedings{bastan-etal-2022-bionli,\n title = \"{B}io{NLI}: Generating a Biomedical {NLI} Dataset Using Lexico-semantic Constraints for Adversarial Examples\",\n author = \"Bastan, Mohaddeseh and\n Surdeanu, Mihai and\n Balasubramanian, Niranjan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.374/\",\n doi = \"10.18653/v1/2022.findings-emnlp.374\",\n pages = \"5093--5104\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.374.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.374/", + "pdf_size": 188372, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10014825227969147244&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Stony Brook University; University of Arizona; Stony Brook University", + "aff_domain": "cs.stonybrook.edu;email.arizona.edu;cs.stonybrook.edu", + "email": "cs.stonybrook.edu;email.arizona.edu;cs.stonybrook.edu", + "github": "https://github.com/StonyBrookNLP/BioNLI", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Stony Brook University;University of Arizona", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.stonybrook.edu;https://www.arizona.edu", + "aff_unique_abbr": "SBU;UA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.390", + "title": "BioReader: a Retrieval-Enhanced Text-to-Text Transformer for Biomedical Literature", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The latest batch of research has equipped language models with the ability to attend over relevant and factual information from non-parametric external sources, drawing a complementary path to architectural scaling. Besides mastering language, exploiting and contextualizing the latent world knowledge is crucial in complex domains like biomedicine. However, most works in the field rely on general-purpose models supported by databases like Wikipedia and Books. We introduce BioReader, the first retrieval-enhanced text-to-text model for biomedical natural language processing. Our domain-specific T5-based solution augments the input prompt by fetching and assembling relevant scientific literature chunks from a neural database with \u224860 million tokens centered on PubMed. We fine-tune and evaluate BioReader on a broad array of downstream tasks, significantly outperforming several state-of-the-art methods despite using up to 3x fewer parameters. In tandem with extensive ablation studies, we show that domain knowledge can be easily altered or supplemented to make the model generate correct predictions bypassing the retraining step and thus addressing the literature overload issue.", + "author": "Giacomo Frisoni; Miki Mizutani; Gianluca Moro; Lorenzo Valgimigli", + "authorids": "/g/giacomo-frisoni/; /m/miki-mizutani/; /g/gianluca-moro/; /l/lorenzo-valgimigli/", + "bibtex": "@inproceedings{frisoni-etal-2022-bioreader,\n title = \"{B}io{R}eader: a Retrieval-Enhanced Text-to-Text Transformer for Biomedical Literature\",\n author = \"Frisoni, Giacomo and\n Mizutani, Miki and\n Moro, Gianluca and\n Valgimigli, Lorenzo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.390/\",\n doi = \"10.18653/v1/2022.emnlp-main.390\",\n pages = \"5770--5793\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.390.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.390/", + "pdf_size": 1086410, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16348134973337191815&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Engineering (DISI), University of Bologna, Via dell\u2019Universit\u00e0 50, I-47522 Cesena, Italy; Department of Computer Science and Engineering (DISI), University of Bologna, Via dell\u2019Universit\u00e0 50, I-47522 Cesena, Italy; Department of Computer Science and Engineering (DISI), University of Bologna, Via dell\u2019Universit\u00e0 50, I-47522 Cesena, Italy; Department of Computer Science and Engineering (DISI), University of Bologna, Via dell\u2019Universit\u00e0 50, I-47522 Cesena, Italy", + "aff_domain": "unibo.it;studio.unibo.it;unibo.it;unibo.it", + "email": "unibo.it;studio.unibo.it;unibo.it;unibo.it", + "github": "https://github.com/disi-unibo-nlp", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Bologna", + "aff_unique_dep": "Department of Computer Science and Engineering (DISI)", + "aff_unique_url": "https://www.unibo.it", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Cesena", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "2022.emnlp-industry.63", + "title": "Biomedical NER for the Enterprise with Distillated BERN2 and the Kazu Framework", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "In order to assist the drug discovery/development process, pharmaceutical companies often apply biomedical NER and linking techniques over internal and public corpora. Decades of study of the field of BioNLP has produced a plethora of algorithms, systems and datasets. However, our experience has been that no single open source system meets all the requirements of a modern pharmaceutical company. In this work, we describe these requirements according to our experience of the industry, and present Kazu, a highly extensible, scalable open source framework designed to support BioNLP for the pharmaceutical sector. Kazu is a built around a computationally efficient version of the BERN2 NER model (TinyBERN2), and subsequently wraps several other BioNLP technologies into one coherent system.", + "author": "Wonjin Yoon; Richard Jackson; Elliot Ford; Vladimir Poroshin; Jaewoo Kang", + "authorids": "/w/wonjin-yoon/; /r/richard-jackson/; /e/elliot-ford/; /v/vladimir-poroshin/; /j/jaewoo-kang/", + "bibtex": "@inproceedings{yoon-etal-2022-biomedical,\n title = \"Biomedical {NER} for the Enterprise with Distillated {BERN}2 and the Kazu Framework\",\n author = \"Yoon, Wonjin and\n Jackson, Richard and\n Ford, Elliot and\n Poroshin, Vladimir and\n Kang, Jaewoo\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.63/\",\n doi = \"10.18653/v1/2022.emnlp-industry.63\",\n pages = \"619--626\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.63.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.63/", + "pdf_size": 547183, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16485129106125129162&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "Korea University; AstraZeneca; AstraZeneca; AstraZeneca; Korea University+AIGEN Sciences", + "aff_domain": "korea.ac.kr;astrazeneca.com;astrazeneca.com;astrazeneca.com;korea.ac.kr", + "email": "korea.ac.kr;astrazeneca.com;astrazeneca.com;astrazeneca.com;korea.ac.kr", + "github": "https://github.com/AstraZeneca/KAZU", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;0+2", + "aff_unique_norm": "Korea University;AstraZeneca;AIGEN Sciences", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.korea.ac.kr;https://www.astrazeneca.com;", + "aff_unique_abbr": "KU;AZ;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;0+2", + "aff_country_unique": "South Korea;United Kingdom;United States" + }, + { + "id": "2022.findings-emnlp.154", + "title": "Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Scaling multilingual representation learning beyond the hundred most frequent languages is challenging, in particular to cover the long tail of low-resource languages. We move away from the popular one-for-all multilingual models and focus on training multiple language (family) specific representations, but most prominently enable all languages to still be encoded in the same representational space. We focus on teacher-student training, allowing all encoders to be mutually compatible for bitext mining, and enabling fast learning of new languages. We also combine supervised and self-supervised training, allowing encoders to take advantage of monolingual training data.Our approach significantly outperforms the original LASER encoder. We study very low-resource languages and handle 44 African languages, many of which are not covered by any other model. For these languages, we train sentence encoders and mine bitexts. Adding these mined bitexts yielded an improvement of 3.8 BLEU for NMT into English.", + "author": "Kevin Heffernan; Onur \u00c7elebi; Holger Schwenk", + "authorids": "/k/kevin-heffernan/; /o/onur-celebi/; /h/holger-schwenk/", + "bibtex": "@inproceedings{heffernan-etal-2022-bitext,\n title = \"Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages\",\n author = \"Heffernan, Kevin and\n {\\c{C}}elebi, Onur and\n Schwenk, Holger\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.154/\",\n doi = \"10.18653/v1/2022.findings-emnlp.154\",\n pages = \"2101--2112\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.154.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.154/", + "pdf_size": 258833, + "gs_citation": 87, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10804299440060317409&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Meta AI; Meta AI; Meta AI", + "aff_domain": "fb.com;fb.com;fb.com", + "email": "fb.com;fb.com;fb.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Meta Platforms, Inc.", + "aff_unique_dep": "Meta AI", + "aff_unique_url": "https://meta.com", + "aff_unique_abbr": "Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.590", + "title": "Bloom Library: Multimodal Datasets in 300+ Languages for a Variety of Downstream Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present Bloom Library, a linguistically diverse set of multimodal and multilingual datasets for language modeling, image captioning, visual storytelling, and speech synthesis/recognition. These datasets represent either the most, or among the most, multilingual datasets for each of the included downstream tasks. In total, the initial release of the Bloom Library datasets covers 363 languages across 32 language families. We train downstream task models for various languages represented in the data, showing the viability of the data for future work in low-resource, multimodal NLP and establishing the first known baselines for these downstream tasks in certain languages (e.g., Bisu [bzi], with an estimated population of 700 users). Some of these first-of-their-kind baselines are comparable to state-of-the-art performance for higher-resourced languages. The Bloom Library datasets are released under Creative Commons licenses on the Hugging Face datasets hub to catalyze more linguistically diverse research in the included downstream tasks.", + "author": "Colin Leong; Joshua Nemecek; Jacob Mansdorfer; Anna Filighera; Abraham Owodunni; Daniel Whitenack", + "authorids": "/c/colin-leong/; /j/joshua-nemecek/; /j/jacob-mansdorfer/; /a/anna-filighera/; /a/abraham-owodunni/; /d/daniel-whitenack/", + "bibtex": "@inproceedings{leong-etal-2022-bloom,\n title = \"Bloom Library: Multimodal Datasets in 300+ Languages for a Variety of Downstream Tasks\",\n author = \"Leong, Colin and\n Nemecek, Joshua and\n Mansdorfer, Jacob and\n Filighera, Anna and\n Owodunni, Abraham and\n Whitenack, Daniel\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.590/\",\n doi = \"10.18653/v1/2022.emnlp-main.590\",\n pages = \"8608--8621\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.590.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.590/", + "pdf_size": 473224, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10590020253509383364&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Dayton Research Institute; SIL International; Independent Contractor; TU Darmstadt+Masakhane; Masakhane; SIL International", + "aff_domain": "udayton.edu;sil.org;gmail.com;kom.tu-darmstadt.de;gmail.com;sil.org", + "email": "udayton.edu;sil.org;gmail.com;kom.tu-darmstadt.de;gmail.com;sil.org", + "github": "https://github.com/BloomBooks/BloomDesktop", + "project": "https://bloomlibrary.org/", + "author_num": 6, + "aff_unique_index": "0;1;2;3+4;4;1", + "aff_unique_norm": "University of Dayton Research Institute;SIL International;Independent Contractor;Technische Universit\u00e4t Darmstadt;Masakhane", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.udri.udayton.edu/;https://www.sil.org;;https://www.tu-darmstadt.de;https://masakhane.io", + "aff_unique_abbr": "UDRI;SIL;;TU Darmstadt;", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Darmstadt", + "aff_country_unique_index": "0;0;2+3;3;0", + "aff_country_unique": "United States;;Germany;South Africa" + }, + { + "id": "2022.emnlp-main.704", + "title": "Boosting Document-Level Relation Extraction by Mining and Injecting Logical Rules", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Document-level relation extraction (DocRE) aims at extracting relations of all entity pairs in a document. A key challenge to DocRE lies in the complex interdependency between the relations of entity pairs. Unlike most prior efforts focusing on implicitly powerful representations, the recently proposed LogiRE (Ru et al., 2021) explicitly captures the interdependency by learning logical rules. However, LogiRE requires extra parameterized modules to reason merely after training backbones, and this disjointed optimization of backbones and extra modules may lead to sub-optimal results. In this paper, we propose MILR, a logic enhanced framework that boosts DocRE by Mining and Injecting Logical Rules. MILR first mines logical rules from annotations based on frequencies. Then in training, consistency regularizationis leveraged as an auxiliary loss to penalize instances that violate mined rules. Finally, MILR infers from a global perspective based on integer programming. Compared with LogiRE, MILR does not introduce extra parameters and injects logical rules during both training and inference. Extensive experiments on two benchmarks demonstrate that MILR not only improves the relation extraction performance (1.1%-3.8% F1) but also makes predictions more logically consistent (over 4.5% Logic). More importantly, MILR also consistently outperforms LogiRE on both counts. Code is available at https://github.com/XingYing-stack/MILR.", + "author": "Shengda Fan; Shasha Mo; Jianwei Niu", + "authorids": "/s/shengda-fan/; /s/shasha-mo/; /j/jianwei-niu/", + "bibtex": "@inproceedings{fan-etal-2022-boosting,\n title = \"Boosting Document-Level Relation Extraction by Mining and Injecting Logical Rules\",\n author = \"Fan, Shengda and\n Mo, Shasha and\n Niu, Jianwei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.704/\",\n doi = \"10.18653/v1/2022.emnlp-main.704\",\n pages = \"10311--10323\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.704.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.704/", + "pdf_size": 498575, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12340313025159777960&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "School of Cyber Science and Technology, Beihang University; School of Cyber Science and Technology, Beihang University; Zhongguancun Laboratory + State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University", + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "https://github.com/XingYing-stack/MILR", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1+0", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory", + "aff_unique_dep": "School of Cyber Science and Technology;", + "aff_unique_url": "http://www.buaa.edu.cn;", + "aff_unique_abbr": "Beihang;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.456", + "title": "Boosting Natural Language Generation from Instructions with Meta-Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work has shown that language models (LMs) trained with multi-task instructional learning (MTIL) can solve diverse NLP tasks in zero- and few-shot settings with improved performance compared to prompt tuning. MTIL illustrates that LMs can extract and use information about the task from instructions beyond the surface patterns of the inputs and outputs. This suggests that meta-learning may further enhance the utilization of instructions for effective task transfer. In this paper we investigate whether meta-learning applied to MTIL can further improve generalization to unseen tasks in a zero-shot setting. Specifically, we propose to adapt meta-learning to MTIL in three directions: 1) Model Agnostic Meta Learning (MAML), 2) Hyper-Network (HNet) based adaptation to generate task specific parameters conditioned on instructions, and 3) an approach combining HNet and MAML. Through extensive experiments on the large scale Natural Instructions V2 dataset, we show that our proposed approaches significantly improve over strong baselines in zero-shot settings. In particular, meta-learning improves the effectiveness of instructions and is most impactful when the test tasks are strictly zero-shot (i.e. no similar tasks in the training set) and are \u201chard\u201d for LMs, illustrating the potential of meta-learning for MTIL for out-of-distribution tasks.", + "author": "Budhaditya Deb; Ahmed Hassan Awadallah; Guoqing Zheng", + "authorids": "/b/budhaditya-deb/; /a/ahmed-hassan/; /g/guoqing-zheng/", + "bibtex": "@inproceedings{deb-etal-2022-boosting,\n title = \"Boosting Natural Language Generation from Instructions with Meta-Learning\",\n author = \"Deb, Budhaditya and\n Awadallah, Ahmed Hassan and\n Zheng, Guoqing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.456/\",\n doi = \"10.18653/v1/2022.emnlp-main.456\",\n pages = \"6792--6808\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.456.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.456/", + "pdf_size": 5226472, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1658982566971901518&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Microsoft Research; Microsoft Research; Microsoft Research", + "aff_domain": "microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Microsoft Corporation", + "aff_unique_dep": "Microsoft Research", + "aff_unique_url": "https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "MSR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.81", + "title": "Bootstrapping meaning through listening: Unsupervised learning of spoken sentence embeddings", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Inducing semantic representations directly from speech signals is a highly challenging task but has many useful applications in speech mining and spoken language understanding. This study tackles the unsupervised learning of semantic representations for spoken utterances. Through converting speech signals into hidden units generated from acoustic unit discovery, we propose WavEmbed, a multimodal sequential autoencoder that predicts hidden units from a dense representation of speech. Secondly, we also propose S-HuBERT to induce meaning through knowledge distillation, in which a sentence embedding model is first trained on hidden units and passes its knowledge to a speech encoder through contrastive learning. The best performing model achieves a moderate correlation (0.5 0.6) with human judgments, without relying on any labels or transcriptions. Furthermore, these models can also be easily extended to leverage textual transcriptions of speech to learn much better speech embeddings that are strongly correlated with human annotations. Our proposed methods are applicable to the development of purely data-driven systems for speech mining, indexing and search.", + "author": "Jian Zhu; Zuoyu Tian; Yadong Liu; Cong Zhang; Chia-Wen Lo", + "authorids": "/j/jian-zhu/; /z/zuoyu-tian/; /y/yadong-liu/; /c/cong-zhang/; /c/chia-wen-lo/", + "bibtex": "@inproceedings{zhu-etal-2022-bootstrapping,\n title = \"Bootstrapping meaning through listening: Unsupervised learning of spoken sentence embeddings\",\n author = \"Zhu, Jian and\n Tian, Zuoyu and\n Liu, Yadong and\n Zhang, Cong and\n Lo, Chia-Wen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.81/\",\n doi = \"10.18653/v1/2022.findings-emnlp.81\",\n pages = \"1134--1154\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.81.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.81/", + "pdf_size": 1067805, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10195146519590737076&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of Michigan, Ann Arbor; Indiana University Bloomington; University of British Columbia; Newcastle University; Max Planck Institute for Human Cognitive and Brain Sciences", + "aff_domain": "umich.edu;iu.edu; ; ; ", + "email": "umich.edu;iu.edu; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;4", + "aff_unique_norm": "University of Michigan;Indiana University;University of British Columbia;Newcastle University;Max Planck Institute for Human Cognitive and Brain Sciences", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.umich.edu;https://www.indiana.edu;https://www.ubc.ca;https://www.ncl.ac.uk;https://www.mpi-cbs.de", + "aff_unique_abbr": "UM;IU;UBC;NU;MPI CBS", + "aff_campus_unique_index": "0;1;2", + "aff_campus_unique": "Ann Arbor;Bloomington;Vancouver;", + "aff_country_unique_index": "0;0;1;2;3", + "aff_country_unique": "United States;Canada;United Kingdom;Germany" + }, + { + "id": "2022.emnlp-main.381", + "title": "Borrowing Human Senses: Comment-Aware Self-Training for Social Media Multimodal Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Social media is daily creating massive multimedia content with paired image and text, presenting the pressing need to automate the vision and language understanding for various multimodal classification tasks. Compared to the commonly researched visual-lingual data, social media posts tend to exhibit more implicit image-text relations. To better glue the cross-modal semantics therein, we capture hinting features from user comments, which are retrieved via jointly leveraging visual and lingual similarity. Afterwards, the classification tasks are explored via self-training in a teacher-student framework, motivated by the usually limited labeled data scales in existing benchmarks. Substantial experiments are conducted on four multimodal social media benchmarks for image-text relation classification, sarcasm detection, sentiment classification, and hate speech detection. The results show that our method further advances the performance of previous state-of-the-art models, which do not employ comment modeling or self-training.", + "author": "Chunpu Xu; Jing Li", + "authorids": "/c/chunpu-xu/; /j/jing-li/", + "bibtex": "@inproceedings{xu-li-2022-borrowing,\n title = \"Borrowing Human Senses: Comment-Aware Self-Training for Social Media Multimodal Classification\",\n author = \"Xu, Chunpu and\n Li, Jing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.381/\",\n doi = \"10.18653/v1/2022.emnlp-main.381\",\n pages = \"5644--5656\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.381.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.381/", + "pdf_size": 2021233, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11002247490237274772&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff": "Department of Computing, The Hong Kong Polytechnic University, China; Department of Computing, The Hong Kong Polytechnic University, China", + "aff_domain": "connect.polyu.hk;polyu.edu.hk", + "email": "connect.polyu.hk;polyu.edu.hk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The Hong Kong Polytechnic University", + "aff_unique_dep": "Department of Computing", + "aff_unique_url": "https://www.polyu.edu.hk", + "aff_unique_abbr": "PolyU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.344", + "title": "BotsTalk: Machine-sourced Framework for Automatic Curation of Large-scale Multi-skill Dialogue Datasets", + "track": "main", + "status": "Main", + "award": false, + "abstract": "To build open-domain chatbots that are able to use diverse communicative skills, we propose a novel framework BotsTalk, where multiple agents grounded to the specific target skills participate in a conversation to automatically annotate multi-skill dialogues. We further present Blended Skill BotsTalk (BSBT), a large-scale multi-skill dialogue dataset comprising 300K conversations. Through extensive experiments, we demonstrate that our dataset can be effective for multi-skill dialogue systems which require an understanding of skill blending as well as skill grounding. Our code and data are available at https://github.com/convei-lab/BotsTalk.", + "author": "Minju Kim; Chaehyeong Kim; Yong Ho Song; Seung-won Hwang; Jinyoung Yeo", + "authorids": "/m/minju-kim/; /c/chaehyeong-kim/; /y/yong-ho-song/; /s/seung-won-hwang/; /j/jinyoung-yeo/", + "bibtex": "@inproceedings{kim-etal-2022-botstalk,\n title = \"{B}ots{T}alk: Machine-sourced Framework for Automatic Curation of Large-scale Multi-skill Dialogue Datasets\",\n author = \"Kim, Minju and\n Kim, Chaehyeong and\n Song, Yong Ho and\n Hwang, Seung-won and\n Yeo, Jinyoung\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.344/\",\n doi = \"10.18653/v1/2022.emnlp-main.344\",\n pages = \"5149--5170\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.344.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.344/", + "pdf_size": 1374722, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16389109032873258882&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Department of Artificial Intelligence, Yonsei University; Department of Artificial Intelligence, Yonsei University; Department of Artificial Intelligence, Yonsei University; Department of Computer Science and Engineering, Seoul National University; Department of Artificial Intelligence, Yonsei University", + "aff_domain": "yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr;snu.ac.kr;yonsei.ac.kr", + "email": "yonsei.ac.kr;yonsei.ac.kr;yonsei.ac.kr;snu.ac.kr;yonsei.ac.kr", + "github": "https://github.com/convei-lab/BotsTalk", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Yonsei University;Seoul National University", + "aff_unique_dep": "Department of Artificial Intelligence;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.yonsei.ac.kr;https://www.snu.ac.kr", + "aff_unique_abbr": "Yonsei;SNU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Seoul", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.emnlp-main.435", + "title": "Boundary-Driven Table-Filling for Aspect Sentiment Triplet Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Aspect Sentiment Triplet Extraction (ASTE) aims to extract the aspect terms along with the corresponding opinion terms and the expressed sentiments in the review, which is an important task in sentiment analysis. Previous research efforts generally address the ASTE task in an end-to-end fashion through the table-filling formalization, in which the triplets are represented by a two-dimensional (2D) table of word-pair relations. Under this formalization, a term-level relation is decomposed into multiple independent word-level relations, which leads to relation inconsistency and boundary insensitivity in the face of multi-word aspect terms and opinion terms. To overcome these issues, we propose Boundary-Driven Table-Filling (BDTF), which represents each triplet as a relation region in the 2D table and transforms the ASTE task into detection and classification of relation regions. We also notice that the quality of the table representation greatly affects the performance of BDTF. Therefore, we develop an effective relation representation learning approach to learn the table representation, which can fully exploit both word-to-word interactions and relation-to-relation interactions. Experiments on several public benchmarks show that the proposed approach achieves state-of-the-art performances.", + "author": "Yice Zhang; Yifan Yang; Yihui Li; Bin Liang; Shiwei Chen; Yixue Dang; Min Yang; Ruifeng Xu", + "authorids": "/y/yice-zhang/; /y/yifan-yang/; /y/yihui-li/; /b/bin-liang/; /s/shiwei-chen/; /y/yixue-dang/; /m/min-yang/; /r/ruifeng-xu/", + "bibtex": "@inproceedings{zhang-etal-2022-boundary,\n title = \"Boundary-Driven Table-Filling for Aspect Sentiment Triplet Extraction\",\n author = \"Zhang, Yice and\n Yang, Yifan and\n Li, Yihui and\n Liang, Bin and\n Chen, Shiwei and\n Dang, Yixue and\n Yang, Min and\n Xu, Ruifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.435/\",\n doi = \"10.18653/v1/2022.emnlp-main.435\",\n pages = \"6485--6498\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.435.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.435/", + "pdf_size": 862633, + "gs_citation": 53, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15173510811780977143&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China; Harbin Insitute of Technology, Shenzhen, China+Peng Cheng Laboratory, Shenzhen, China; Joint Lab of HITSZ and China Merchants Securities; SIAT, Chinese Academy of Sciences, Shenzhen, China; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "163.com;163.com;163.com;stu.hit.edu.cn;pcl.ac.cn;cmschina.com.cn;siat.ac.cn;hit.edu.cn", + "email": "163.com;163.com;163.com;stu.hit.edu.cn;pcl.ac.cn;cmschina.com.cn;siat.ac.cn;hit.edu.cn", + "github": "https://github.com/HITSZ-HLT/BDTF-ABSA", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1+2;0+1+2;0+1+2;0+1+2;0+2;3;4;0+1+2", + "aff_unique_norm": "Harbin Institute of Technology;Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies;Peng Cheng Laboratory;Harbin Institute of Technology Shenzhen;Shenzhen Institute of Advanced Technology", + "aff_unique_dep": ";Provincial Key Laboratory of Novel Security Intelligence Technologies;;Joint Lab;", + "aff_unique_url": "http://en.hhit.edu.cn/;;;http://en.hitsz.edu.cn/;http://www.siat.ac.cn", + "aff_unique_abbr": "HIT;;;HITSZ;SIAT", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0+0;0;0;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0+0+0;0+0;0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.472", + "title": "Break it Down into BTS: Basic, Tiniest Subword Units for Korean", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We introduce Basic, Tiniest Subword (BTS) units for the Korean language, which are inspired by the invention principle of Hangeul, the Korean writing system. Instead of relying on 51 Korean consonant and vowel letters, we form the letters from BTS units by adding strokes or combining them. To examine the impact of BTS units on Korean language processing, we develop a novel BTS-based word embedding framework that is readily applicable to various models. Our experiments reveal that BTS units significantly improve the performance of Korean word embedding on all intrinsic and extrinsic tasks in our evaluation. In particular, BTS-based word embedding outperforms the state-of-theart Korean word embedding by 11.8% in word analogy. We further investigate the unique advantages provided by BTS units through indepth analysis.", + "author": "Nayeon Kim; Jun-Hyung Park; Joon-Young Choi; Eojin Jeon; Youjin Kang; SangKeun Lee", + "authorids": "/n/nayeon-kim/; /j/jun-hyung-park/; /j/joon-young-choi/; /e/eojin-jeon/; /y/youjin-kang/; /s/sangkeun-lee/", + "bibtex": "@inproceedings{kim-etal-2022-break,\n title = \"Break it Down into {BTS}: Basic, Tiniest Subword Units for {K}orean\",\n author = \"Kim, Nayeon and\n Park, Jun-Hyung and\n Choi, Joon-Young and\n Jeon, Eojin and\n Kang, Youjin and\n Lee, SangKeun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.472/\",\n doi = \"10.18653/v1/2022.emnlp-main.472\",\n pages = \"7007--7024\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.472.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.472/", + "pdf_size": 2910015, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2343088287054421506&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Engineering+Department of Artificial Intelligence, Korea University; Department of Computer Science and Engineering+Department of Artificial Intelligence, Korea University; Department of Artificial Intelligence, Korea University; Department of Artificial Intelligence, Korea University; Department of Computer Science and Engineering, Korea University; Department of Computer Science and Engineering+Department of Artificial Intelligence, Korea University", + "aff_domain": "korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr", + "github": "https://github.com/irishev/BTS", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;1;1;1;0+1", + "aff_unique_norm": "University of California, San Diego;Korea University", + "aff_unique_dep": "Department of Computer Science and Engineering;Department of Artificial Intelligence", + "aff_unique_url": "https://cse.ucsd.edu;https://www.korea.ac.kr", + "aff_unique_abbr": "UCSD CSE;KU", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0+1;1;1;1;0+1", + "aff_country_unique": "United States;South Korea" + }, + { + "id": "2022.emnlp-main.434", + "title": "Breaking the Representation Bottleneck of Chinese Characters: Neural Machine Translation with Stroke Sequence Modeling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing research generally treats Chinese character as a minimum unit for representation. However, such Chinese character representation will suffer two bottlenecks: 1) Learning bottleneck, the learning cannot benefit from its rich internal features (e.g., radicals and strokes); and 2) Parameter bottleneck, each individual character has to be represented by a unique vector. In this paper, we introduce a novel representation method for Chinese characters to break the bottlenecks, namely StrokeNet, which represents a Chinese character by a Latinized stroke sequence (e.g., \u201c\u51f9 (concave)\u201d to \u201cajaie\u201d and \u201c\u51f8 (convex)\u201d to \u201caeaqe\u201d). Specifically, StrokeNet maps each stroke to a specific Latin character, thus allowing similar Chinese characters to have similar Latin representations. With the introduction of StrokeNet to neural machine translation (NMT), many powerful but not applicable techniques to non-Latin languages (e.g., shared subword vocabulary learning and ciphertext-based data augmentation) can now be perfectly implemented. Experiments on the widely-used NIST Chinese-English, WMT17 Chinese-English and IWSLT17 Japanese-English NMT tasks show that StrokeNet can provide a significant performance boost over the strong baselines with fewer model parameters, achieving 26.5 BLEU on the WMT17 Chinese-English task which is better than any previously reported results without using monolingual data. Code and scripts are freely available at https://github.com/zjwang21/StrokeNet.", + "author": "Zhijun Wang; Xuebo Liu; Min Zhang", + "authorids": "/z/zhijun-wang/; /x/xuebo-liu/; /m/min-zhang/", + "bibtex": "@inproceedings{wang-etal-2022-breaking,\n title = \"Breaking the Representation Bottleneck of {C}hinese Characters: Neural Machine Translation with Stroke Sequence Modeling\",\n author = \"Wang, Zhijun and\n Liu, Xuebo and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.434/\",\n doi = \"10.18653/v1/2022.emnlp-main.434\",\n pages = \"6473--6484\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.434.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.434/", + "pdf_size": 783646, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10524937214015232144&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Institute of Computing and Intelligence, Harbin Institute of Technology, Shenzhen, China; Institute of Computing and Intelligence, Harbin Institute of Technology, Shenzhen, China; Institute of Computing and Intelligence, Harbin Institute of Technology, Shenzhen, China", + "aff_domain": "stu.hit.edu.cn;hit.edu.cn;hit.edu.cn", + "email": "stu.hit.edu.cn;hit.edu.cn;hit.edu.cn", + "github": "https://github.com/zjwang21/StrokeNet", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Harbin Institute of Technology", + "aff_unique_dep": "Institute of Computing and Intelligence", + "aff_unique_url": "http://www.hhit.edu.cn", + "aff_unique_abbr": "HIT", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.658", + "title": "Breakpoint Transformers for Modeling and Tracking Intermediate Beliefs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Can we teach models designed for language understanding tasks to track and improve their beliefs through intermediate points in text? Besides making their inner workings more transparent, this would also help make models more reliable and consistent. To this end, we propose a representation learning framework called breakpoint modeling that allows for efficient and robust learning of this type. Given any text encoder and data marked with intermediate states (breakpoints) along with corresponding textual queries viewed as true/false propositions (i.e., the candidate intermediate beliefs of a model), our approach trains models in an efficient and end-to-end fashion to build intermediate representations that facilitate direct querying and training of beliefs at arbitrary points in text, alongside solving other end-tasks. We evaluate breakpoint modeling on a diverse set of NLU tasks including relation reasoning on Cluttr and narrative understanding on bAbI. Using novel proposition prediction tasks alongside these end-tasks, we show the benefit of our T5-based breakpoint transformer over strong conventional representation learning approaches in terms of processing efficiency, belief accuracy, and belief consistency, all with minimal to no degradation on the end-task. To show the feasibility of incorporating our belief tracker into more complex reasoning pipelines, we also obtain state-of-the-art performance on the three-tiered reasoning challenge for the recent TRIP benchmark (23-32% absolute improvement on Tasks 2-3).", + "author": "Kyle Richardson; Ronen Tamari; Oren Sultan; Dafna Shahaf; Reut Tsarfaty; Ashish Sabharwal", + "authorids": "/k/kyle-richardson/; /r/ronen-tamari/; /o/oren-sultan/; /d/dafna-shahaf/; /r/reut-tsarfaty/; /a/ashish-sabharwal/", + "bibtex": "@inproceedings{richardson-etal-2022-breakpoint,\n title = \"Breakpoint Transformers for Modeling and Tracking Intermediate Beliefs\",\n author = \"Richardson, Kyle and\n Tamari, Ronen and\n Sultan, Oren and\n Shahaf, Dafna and\n Tsarfaty, Reut and\n Sabharwal, Ashish\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.658/\",\n doi = \"10.18653/v1/2022.emnlp-main.658\",\n pages = \"9703--9719\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.658.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.658/", + "pdf_size": 1015453, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7512185385647051647&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "The Hebrew University of Jerusalem+Allen Institute for AI; The Hebrew University of Jerusalem+Allen Institute for AI; The Hebrew University of Jerusalem; Bar-Ilan University+Allen Institute for AI; The Hebrew University of Jerusalem; Allen Institute for AI", + "aff_domain": "allenai.org;cs.huji.ac.il;cs.huji.ac.il;allenai.org;cs.huji.ac.il;allenai.org", + "email": "allenai.org;cs.huji.ac.il;cs.huji.ac.il;allenai.org;cs.huji.ac.il;allenai.org", + "github": "https://github.com/allenai/situation_modeling", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0;2+1;0;1", + "aff_unique_norm": "The Hebrew University of Jerusalem;Allen Institute for AI;Bar-Ilan University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.huji.ac.il;https://allenai.org;https://www.biu.ac.il", + "aff_unique_abbr": "HUJI;AI2;BIU", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0+1;0;0+1;0;1", + "aff_country_unique": "Israel;United States" + }, + { + "id": "2022.emnlp-main.533", + "title": "Bridging Fairness and Environmental Sustainability in Natural Language Processing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Fairness and environmental impact are important research directions for the sustainable development of artificial intelligence. However, while each topic is an active research area in natural language processing (NLP), there is a surprising lack of research on the interplay between the two fields. This lacuna is highly problematic, since there is increasing evidence that an exclusive focus on fairness can actually hinder environmental sustainability, and vice versa. In this work, we shed light on this crucial intersection in NLP by (1) investigating the efficiency of current fairness approaches through surveying example methods for reducing unfair stereotypical bias from the literature, and (2) evaluating a common technique to reduce energy consumption (and thus environmental impact) of English NLP models, knowledge distillation (KD), for its impact on fairness. In this case study, we evaluate the effect of important KD factors, including layer and dimensionality reduction, with respect to: (a) performance on the distillation task (natural language inference and semantic similarity prediction), and (b) multiple measures and dimensions of stereotypical bias (e.g., gender bias measured via the Word Embedding Association Test). Our results lead us to clarify current assumptions regarding the effect of KD on unfair bias: contrary to other findings, we show that KD can actually decrease model fairness.", + "author": "Marius Hessenthaler; Emma Strubell; Dirk Hovy; Anne Lauscher", + "authorids": "/m/marius-hessenthaler/; /e/emma-strubell/; /d/dirk-hovy/; /a/anne-lauscher/", + "bibtex": "@inproceedings{hessenthaler-etal-2022-bridging,\n title = \"Bridging Fairness and Environmental Sustainability in Natural Language Processing\",\n author = \"Hessenthaler, Marius and\n Strubell, Emma and\n Hovy, Dirk and\n Lauscher, Anne\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.533/\",\n doi = \"10.18653/v1/2022.emnlp-main.533\",\n pages = \"7817--7836\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.533.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.533/", + "pdf_size": 1873517, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4294667923590731092&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Data and Web Science Group, University of Mannheim, Germany; Language Technologies Institute, Carnegie Mellon University, U.S.; MilaNLP, Bocconi University, Italy; Data Science Group, University of Hamburg, Germany", + "aff_domain": "web.de;cmu.edu;unibocconi.it;uni-hamburg.de", + "email": "web.de;cmu.edu;unibocconi.it;uni-hamburg.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "University of Mannheim;Carnegie Mellon University;Bocconi University;University of Hamburg", + "aff_unique_dep": "Data and Web Science Group;Language Technologies Institute;MilaNLP;Data Science Group", + "aff_unique_url": "https://www.uni-mannheim.de;https://www.cmu.edu;https://www.bocconi.edu;https://www.uni-hamburg.de", + "aff_unique_abbr": ";CMU;;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;0", + "aff_country_unique": "Germany;United States;Italy" + }, + { + "id": "2022.findings-emnlp.272", + "title": "Bridging the Training-Inference Gap for Dense Phrase Retrieval", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Building dense retrievers requires a series of standard procedures, including training and validating neural models and creating indexes for efficient search. However, these procedures are often misaligned in that training objectives do not exactly reflect the retrieval scenario at inference time. In this paper, we explore how the gap between training and inference in dense retrieval can be reduced, focusing on dense phrase retrieval (Lee et al., 2021) where billions of representations are indexed at inference. Since validating every dense retriever with a large-scale index is practically infeasible, we propose an efficient way of validating dense retrievers using a small subset of the entire corpus. This allows us to validate various training strategies including unifying contrastive loss terms and using hard negatives for phrase retrieval, which largely reduces the training-inference discrepancy. As a result, we improve top-1 phrase retrieval accuracy by 2 3 points and top-20 passage retrieval accuracy by 2 4 points for open-domain question answering. Our work urges modeling dense retrievers with careful consideration of training and inference via efficient validation while advancing phrase retrieval as a general solution for dense retrieval.", + "author": "Gyuwan Kim; Jinhyuk Lee; Barlas Oguz; Wenhan Xiong; Yizhe Zhang; Yashar Mehdad; William Yang Wang", + "authorids": "/g/gyuwan-kim/; /j/jinhyuk-lee/; /b/barlas-oguz/; /w/wenhan-xiong/; /y/yizhe-zhang/; /y/yashar-mehdad/; /w/william-yang-wang/", + "bibtex": "@inproceedings{kim-etal-2022-bridging,\n title = \"Bridging the Training-Inference Gap for Dense Phrase Retrieval\",\n author = \"Kim, Gyuwan and\n Lee, Jinhyuk and\n Oguz, Barlas and\n Xiong, Wenhan and\n Zhang, Yizhe and\n Mehdad, Yashar and\n Wang, William Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.272/\",\n doi = \"10.18653/v1/2022.findings-emnlp.272\",\n pages = \"3713--3724\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.272.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.272/", + "pdf_size": 688276, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4099709048875276663&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of California, Santa Barbara; Korea University; Meta AI; Meta AI; Meta AI; Meta AI; University of California, Santa Barbara", + "aff_domain": "ucsb.edu;korea.ac.kr;fb.com;fb.com;fb.com;fb.com;cs.ucsb.edu", + "email": "ucsb.edu;korea.ac.kr;fb.com;fb.com;fb.com;fb.com;cs.ucsb.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;2;2;2;0", + "aff_unique_norm": "University of California, Santa Barbara;Korea University;Meta Platforms, Inc.", + "aff_unique_dep": ";;Meta AI", + "aff_unique_url": "https://www.ucsb.edu;https://www.korea.ac.kr;https://meta.com", + "aff_unique_abbr": "UCSB;KU;Meta", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Santa Barbara;", + "aff_country_unique_index": "0;1;0;0;0;0;0", + "aff_country_unique": "United States;South Korea" + }, + { + "id": "2022.emnlp-industry.44", + "title": "Bringing the State-of-the-Art to Customers: A Neural Agent Assistant Framework for Customer Service Support", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Building Agent Assistants that can help improve customer service support requires inputs from industry users and their customers, as well as knowledge about state-of-the-art Natural Language Processing (NLP) technology. We combine expertise from academia and industry to bridge the gap and build task/domain-specific Neural Agent Assistants (NAA) with three high-level components for: (1) Intent Identification, (2) Context Retrieval, and (3) Response Generation. In this paper, we outline the pipeline of the NAA\u2019s core system and also present three case studies in which three industry partners successfully adapt the framework to find solutions to their unique challenges. Our findings suggest that a collaborative process is instrumental in spurring the development of emerging NLP models for Conversational AI tasks in industry. The full reference implementation code and results are available at https://github.com/VectorInstitute/NAA.", + "author": "Stephen Obadinma; Faiza Khan Khattak; Shirley Wang; Tania Sidhorn; Elaine Lau; Sean Robertson; Jingcheng Niu; Winnie Au; Alif Munim; Karthik Raja Kalaiselvi Bhaskar", + "authorids": "/s/stephen-obadinma/; /f/faiza-khan-khattak/; /s/shirley-wang/; /t/tania-sidhorn/; /e/elaine-lau/; /s/sean-robertson/; /j/jingcheng-niu/; /w/winnie-au/; /a/alif-munim/; /k/karthik-raja-kalaiselvi-bhaskar/", + "bibtex": "@inproceedings{obadinma-etal-2022-bringing,\n title = \"Bringing the State-of-the-Art to Customers: A Neural Agent Assistant Framework for Customer Service Support\",\n author = \"Obadinma, Stephen and\n Khan Khattak, Faiza and\n Wang, Shirley and\n Sidhorn, Tania and\n Lau, Elaine and\n Robertson, Sean and\n Niu, Jingcheng and\n Au, Winnie and\n Munim, Alif and\n Kalaiselvi Bhaskar, Karthik Raja\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.44/\",\n doi = \"10.18653/v1/2022.emnlp-industry.44\",\n pages = \"440--450\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.44.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.44/", + "pdf_size": 331749, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4456356280962819707&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;;;;", + "aff_domain": ";;;;;;;;;", + "email": ";;;;;;;;;", + "github": "", + "project": "", + "author_num": 10 + }, + { + "id": "2022.findings-emnlp.260", + "title": "CANarEx: Contextually Aware Narrative Extraction for Semantically Rich Text-as-data Applications", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Narrative modelling is an area of active research, motivated by the acknowledgement of narratives as drivers of societal decision making. These research efforts conceptualize narratives as connected entity chains, and modeling typically focuses on the identification of entities and their connections within a text. An emerging approach to narrative modelling is the use of semantic role labeling (SRL) to extract Entity-Verb-Entity (E-V-Es) tuples from a text, followed by dimensionality reduction to reduce the space of entities and connections separately. This process penalises the semantic richness of narratives and discards much contextual information along the way. Here, we propose an alternate narrative extraction approach - CANarEx, incorporating a pipeline of common contextual constructs through co-reference resolution, micro-narrative generation and clustering of these narratives through sentence embeddings. We evaluate our approach through testing the recovery of \u201cnarrative time-series clusters\u201d, mimicking a desirable text-as-data task. The evaluation framework leverages synthetic data generated using a GPT-3 model. The GPT-3 model is trained to generate similar sentences using a large dataset of news articles. The synthetic data maps to three topics in the news dataset. We then generate narrative time-series document cluster representations by mapping the synthetic data to three distinct signals synthetically injected into the testing corpus. Evaluation results demonstrate the superior ability of CANarEx to recover narrative time-series through reduced MSE and improved precision/recall relative to existing methods. The validity is further reinforced through ablation studies and qualitative analysis.", + "author": "Nandini Anantharama; Simon Angus; Lachlan O\u2019Neill", + "authorids": "/n/nandini-anantharama/; /s/simon-angus/; /l/lachlan-oneill/", + "bibtex": "@inproceedings{anantharama-etal-2022-canarex,\n title = \"{CAN}ar{E}x: Contextually Aware Narrative Extraction for Semantically Rich Text-as-data Applications\",\n author = \"Anantharama, Nandini and\n Angus, Simon and\n O{'}Neill, Lachlan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.260/\",\n doi = \"10.18653/v1/2022.findings-emnlp.260\",\n pages = \"3551--3564\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.260.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.260/", + "pdf_size": 2320328, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10018149070889912668&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "SoDa Laboratories; Dept. of Economics + SoDa Laboratories; Faculty of IT, Monash University, Australia", + "aff_domain": "monash.edu;monash.edu;monash.edu", + "email": "monash.edu;monash.edu;monash.edu", + "github": "https://github.com/nandinisa/CANarEx", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+0;2", + "aff_unique_norm": "SoDa Laboratories;University Affiliation Not Specified;Monash University", + "aff_unique_dep": ";Department of Economics;Faculty of IT", + "aff_unique_url": "http://www.soda-labs.fr/;;https://www.monash.edu", + "aff_unique_abbr": ";;Monash", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;2", + "aff_country_unique": "France;;Australia" + }, + { + "id": "2022.findings-emnlp.51", + "title": "CARE: Causality Reasoning for Empathetic Responses by Conditional Graph Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent approaches to empathetic response generation incorporate emotion causalities to enhance comprehension of both the user\u2019s feelings and experiences. However, these approaches suffer from two critical issues. First, they only consider causalities between the user\u2019s emotion and the user\u2019s experiences, and ignore those between the user\u2019s experiences. Second, they neglect interdependence among causalities and reason them independently. To solve the above problems, we expect to reason all plausible causalities interdependently and simultaneously, given the user\u2019s emotion, dialogue history, and future dialogue content. Then, we infuse these causalities into response generation for empathetic responses. Specifically, we design a new model, i.e., the Conditional Variational Graph Auto-Encoder (CVGAE), for the causality reasoning, and adopt a multi-source attention mechanism in the decoder for the causality infusion. We name the whole framework as CARE, abbreviated for CAusality Reasoning for Empathetic conversation. Experimental results indicate that our method achieves state-of-the-art performance.", + "author": "Jiashuo Wang; Yi Cheng; Wenjie Li", + "authorids": "/j/jiashuo-wang/; /y/yi-cheng/; /w/wenjie-li/", + "bibtex": "@inproceedings{wang-etal-2022-care,\n title = \"{CARE}: Causality Reasoning for Empathetic Responses by Conditional Graph Generation\",\n author = \"Wang, Jiashuo and\n Cheng, Yi and\n Li, Wenjie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.51/\",\n doi = \"10.18653/v1/2022.findings-emnlp.51\",\n pages = \"729--741\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.51.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.51/", + "pdf_size": 584374, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11509182178035343153&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Hong Kong Polytechnic University; Hong Kong Polytechnic University; Hong Kong Polytechnic University", + "aff_domain": "comp.polyu.edu.hk;comp.polyu.edu.hk;comp.polyu.edu.hk", + "email": "comp.polyu.edu.hk;comp.polyu.edu.hk;comp.polyu.edu.hk", + "github": "https://github.com/wangjs9/CARE-master", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Hong Kong Polytechnic University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.polyu.edu.hk", + "aff_unique_abbr": "PolyU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.295", + "title": "CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Code pre-trained models (CodePTMs) have recently demonstrated significant success in code intelligence. To interpret these models, some probing methods have been applied. However, these methods fail to consider the inherent characteristics of codes. In this paper, to address the problem, we propose a novel probing method CAT-probing to quantitatively interpret how CodePTMs attend code structure. We first denoise the input code sequences based on the token types pre-defined by the compilers to filter those tokens whose attention scores are too small. After that, we define a new metric CAT-score to measure the commonality between the token-level attention scores generated in CodePTMs and the pair-wise distances between corresponding AST nodes. The higher the CAT-score, the stronger the ability of CodePTMs to capture code structure. We conduct extensive experiments to integrate CAT-probing with representative CodePTMs for different programming languages. Experimental results show the effectiveness of CAT-probing in CodePTM interpretation. Our codes and data are publicly available at https://github.com/nchen909/CodeAttention.", + "author": "Nuo Chen; Qiushi Sun; Renyu Zhu; Xiang Li; Xuesong Lu; Ming Gao", + "authorids": "/n/nuo-chen/; /q/qiushi-sun/; /r/renyu-zhu/; /x/xiang-li/; /x/xuesong-lu/; /m/ming-gao/", + "bibtex": "@inproceedings{chen-etal-2022-cat,\n title = \"{CAT}-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure\",\n author = \"Chen, Nuo and\n Sun, Qiushi and\n Zhu, Renyu and\n Li, Xiang and\n Lu, Xuesong and\n Gao, Ming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.295/\",\n doi = \"10.18653/v1/2022.findings-emnlp.295\",\n pages = \"4000--4008\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.295.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.295/", + "pdf_size": 567271, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8820425489310263595&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "School of Data Science and Engineering, East China Normal University, Shanghai, China; School of Data Science and Engineering, East China Normal University, Shanghai, China; School of Data Science and Engineering, East China Normal University, Shanghai, China; School of Data Science and Engineering, East China Normal University, Shanghai, China; School of Data Science and Engineering, East China Normal University, Shanghai, China; School of Data Science and Engineering, East China Normal University, Shanghai, China", + "aff_domain": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;stu.ecnu.edu.cn;dase.ecnu.edu.cn;dase.ecnu.edu.cn;dase.ecnu.edu.cn", + "email": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;stu.ecnu.edu.cn;dase.ecnu.edu.cn;dase.ecnu.edu.cn;dase.ecnu.edu.cn", + "github": "https://github.com/nchen909/CodeAttention", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "East China Normal University", + "aff_unique_dep": "School of Data Science and Engineering", + "aff_unique_url": "http://www.ecnu.edu.cn", + "aff_unique_abbr": "ECNU", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.2", + "title": "CDConv: A Benchmark for Contradiction Detection in Chinese Conversations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Dialogue contradiction is a critical issue in open-domain dialogue systems. The contextualization nature of conversations makes dialogue contradiction detection rather challenging. In this work, we propose a benchmark for Contradiction Detection in Chinese Conversations, namely CDConv. It contains 12K multi-turn conversations annotated with three typical contradiction categories: Intra-sentence Contradiction, Role Confusion, and History Contradiction. To efficiently construct the CDConv conversations, we devise a series of methods for automatic conversation generation, which simulate common user behaviors that trigger chatbots to make contradictions. We conduct careful manual quality screening of the constructed conversations and show that state-of-the-art Chinese chatbots can be easily goaded into making contradictions. Experiments on CDConv show that properly modeling contextual information is critical for dialogue contradiction detection, but there are still unresolved challenges that require future research.", + "author": "Chujie Zheng; Jinfeng Zhou; Yinhe Zheng; Libiao Peng; Zhen Guo; Wenquan Wu; Zheng-Yu Niu; Hua Wu; Minlie Huang", + "authorids": "/c/chujie-zheng/; /j/jinfeng-zhou/; /y/yinhe-zheng/; /l/libiao-peng/; /z/zhen-guo/; /w/wenquan-wu/; /z/zheng-yu-niu/; /h/hua-wu/; /m/minlie-huang/", + "bibtex": "@inproceedings{zheng-etal-2022-cdconv,\n title = \"{CDC}onv: A Benchmark for Contradiction Detection in {C}hinese Conversations\",\n author = \"Zheng, Chujie and\n Zhou, Jinfeng and\n Zheng, Yinhe and\n Peng, Libiao and\n Guo, Zhen and\n Wu, Wenquan and\n Niu, Zheng-Yu and\n Wu, Hua and\n Huang, Minlie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.2/\",\n doi = \"10.18653/v1/2022.emnlp-main.2\",\n pages = \"18--29\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.2.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.2/", + "pdf_size": 1888232, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4900918300156290577&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 6, + "aff": ";;;;;;;;", + "aff_domain": ";;;;;;;;", + "email": ";;;;;;;;", + "github": "https://www.github.com/thu-coai/CDConv; https://github.com/PaddlePaddle/Knover/tree/dygraph/projects/cdconv", + "project": "", + "author_num": 9 + }, + { + "id": "2022.findings-emnlp.429", + "title": "CDGP: Automatic Cloze Distractor Generation based on Pre-trained Language Model", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Manually designing cloze test consumes enormous time and efforts. The major challenge lies in wrong option (distractor) selection. Having carefully-design distractors improves the effectiveness of learner ability assessment. As a result, the idea of automatically generating cloze distractor is motivated. In this paper, we investigate cloze distractor generation by exploring the employment of pre-trained language models (PLMs) as an alternative for candidate distractor generation. Experiments show that the PLM-enhanced model brings a substantial performance improvement. Our best performing model advances the state-of-the-art result from 14.94 to 34.17 (NDCG@10 score). Our code and dataset is available at https://github.com/AndyChiangSH/CDGP.", + "author": "Shang-Hsuan Chiang; Ssu-Cheng Wang; Yao-Chung Fan", + "authorids": "/s/shang-hsuan-chiang/; /s/ssu-cheng-wang/; /y/yao-chung-fan/", + "bibtex": "@inproceedings{chiang-etal-2022-cdgp,\n title = \"{CDGP}: Automatic Cloze Distractor Generation based on Pre-trained Language Model\",\n author = \"Chiang, Shang-Hsuan and\n Wang, Ssu-Cheng and\n Fan, Yao-Chung\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.429/\",\n doi = \"10.18653/v1/2022.findings-emnlp.429\",\n pages = \"5835--5840\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.429.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.429/", + "pdf_size": 306853, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14959884355300431115&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 3, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.emnlp-main.416", + "title": "CEFR-Based Sentence Difficulty Annotation and Assessment", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Controllable text simplification is a crucial assistive technique for language learning and teaching. One of the primary factors hindering its advancement is the lack of a corpus annotated with sentence difficulty levels based on language ability descriptions. To address this problem, we created the CEFR-based Sentence Profile (CEFR-SP) corpus, containing 17k English sentences annotated with the levels based on the Common European Framework of Reference for Languages assigned by English-education professionals. In addition, we propose a sentence-level assessment model to handle unbalanced level distribution because the most basic and highly proficient sentences are naturally scarce. In the experiments in this study, our method achieved a macro-F1 score of 84.5% in the level assessment, thus outperforming strong baselines employed in readability assessment.", + "author": "Yuki Arase; Satoru Uchida; Tomoyuki Kajiwara", + "authorids": "/y/yuki-arase/; /s/satoru-uchida/; /t/tomoyuki-kajiwara/", + "bibtex": "@inproceedings{arase-etal-2022-cefr,\n title = \"{CEFR}-Based Sentence Difficulty Annotation and Assessment\",\n author = \"Arase, Yuki and\n Uchida, Satoru and\n Kajiwara, Tomoyuki\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.416/\",\n doi = \"10.18653/v1/2022.emnlp-main.416\",\n pages = \"6206--6219\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.416.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.416/", + "pdf_size": 423030, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16688229836650323145&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Graduate School of Information Science and Technology, Osaka University, Japan; Faculty of Languages and Cultures, Kyushu University, Japan; Graduate School of Science and Engineering, Ehime University, Japan", + "aff_domain": "ist.osaka-u.ac.jp;flc.kyushu-u.ac.jp;cs.ehime-u.ac.jp", + "email": "ist.osaka-u.ac.jp;flc.kyushu-u.ac.jp;cs.ehime-u.ac.jp", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Osaka University;Kyushu University;Ehime University", + "aff_unique_dep": "Graduate School of Information Science and Technology;Faculty of Languages and Cultures;Graduate School of Science and Engineering", + "aff_unique_url": "https://www.osaka-u.ac.jp;https://www.kyushu-u.ac.jp;https://www.ehime-u.ac.jp", + "aff_unique_abbr": "Osaka U;Kyushu U;Ehime U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.emnlp-main.213", + "title": "CEM: Machine-Human Chatting Handoff via Causal-Enhance Module", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Aiming to ensure chatbot quality by predicting chatbot failure and enabling human-agent collaboration, Machine-Human Chatting Handoff (MHCH) has attracted lots of attention from both industry and academia in recent years. However, most existing methods mainly focus on the dialogue context or assist with global satisfaction prediction based on multi-task learning, which ignore the grounded relationships among the causal variables, like the user state and labor cost. These variables are significantly associated with handoff decisions, resulting in prediction bias and cost increasement. Therefore, we propose Causal-Enhance Module (CEM) by establishing the causal graph of MHCH based on these two variables, which is a simple yet effective module and can be easy to plug into the existing MHCH methods. For the impact of users, we use the user state to correct the prediction bias according to the causal relationship of multi-task. For the labor cost, we train an auxiliary cost simulator to calculate unbiased labor cost through counterfactual learning so that a model becomes cost-aware.Extensive experiments conducted on four real-world benchmarks demonstrate the effectiveness of CEM in generally improving the performance of existing MHCH methods without any elaborated model crafting.", + "author": "Shanshan Zhong; Jinghui Qin; Zhongzhan Huang; Daifeng Li", + "authorids": "/s/shanshan-zhong/; /j/jinghui-qin/; /z/zhongzhan-huang/; /d/daifeng-li/", + "bibtex": "@inproceedings{zhong-etal-2022-cem,\n title = \"{CEM}: Machine-Human Chatting Handoff via Causal-Enhance Module\",\n author = \"Zhong, Shanshan and\n Qin, Jinghui and\n Huang, Zhongzhan and\n Li, Daifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.213/\",\n doi = \"10.18653/v1/2022.emnlp-main.213\",\n pages = \"3242--3253\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.213.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.213/", + "pdf_size": 857068, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15989898558925515126&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University; School of Computer Science and Engineering, Sun Yat-sen University; School of Information Management, Sun Yat-sen University", + "aff_domain": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn", + "email": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Sun Yat-sen University", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "http://www.sysu.edu.cn", + "aff_unique_abbr": "SYSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-industry.48", + "title": "CGF: Constrained Generation Framework for Query Rewriting in Conversational AI", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "In conversational AI agents, Query Rewriting (QR) plays a crucial role in reducing user frictions and satisfying their daily demands. User frictions are caused by various reasons, such as errors in the conversational AI system, users\u2019 accent or their abridged language. In this work, we present a novel Constrained Generation Framework (CGF) for query rewriting at both global and personalized levels. It is based on the encoder-decoder framework, where the encoder takes the query and its previous dialogue turns as the input to form a context-enhanced representation, and the decoder uses constrained decoding to generate the rewrites based on the pre-defined global or personalized constrained decoding space. Extensive offline and online A/B experiments show that the proposed CGF significantly boosts the query rewriting performance.", + "author": "Jie Hao; Yang Liu; Xing Fan; Saurabh Gupta; Saleh Soltan; Rakesh Chada; Pradeep Natarajan; Chenlei Guo; Gokhan Tur", + "authorids": "/j/jie-hao/; /y/yang-liu-icsi/; /x/xing-fan/; /s/saurabh-gupta/; /s/saleh-soltan/; /r/rakesh-chada/; /p/pradeep-natarajan/; /c/chenlei-guo/; /g/gokhan-tur/", + "bibtex": "@inproceedings{hao-etal-2022-cgf,\n title = \"{CGF}: Constrained Generation Framework for Query Rewriting in Conversational {AI}\",\n author = \"Hao, Jie and\n Liu, Yang and\n Fan, Xing and\n Gupta, Saurabh and\n Soltan, Saleh and\n Chada, Rakesh and\n Natarajan, Pradeep and\n Guo, Chenlei and\n Tur, Gokhan\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.48/\",\n doi = \"10.18653/v1/2022.emnlp-industry.48\",\n pages = \"475--483\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.48.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.48/", + "pdf_size": 1242967, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12268827193619299194&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Linkedin; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI", + "aff_domain": "amazon.com;amazon.com;amazon.com;gmail.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;gmail.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;1;0;0;0;0;0", + "aff_unique_norm": "Amazon;LinkedIn Corporation", + "aff_unique_dep": "Alexa AI;", + "aff_unique_url": "https://www.amazon.com;https://www.linkedin.com", + "aff_unique_abbr": "Amazon;LinkedIn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.274", + "title": "CGoDial: A Large-Scale Benchmark for Chinese Goal-oriented Dialog Evaluation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Practical dialog systems need to deal with various knowledge sources, noisy user expressions, and the shortage of annotated data. To better solve the above problems, we propose CGoDial, a new challenging and comprehensive Chinese benchmark for multi-domain Goal-oriented Dialog evaluation. It contains 96,763 dialog sessions, and 574,949 dialog turns totally, covering three datasets with different knowledge sources: 1) a slot-based dialog (SBD) dataset with table-formed knowledge, 2) a flow-based dialog (FBD) dataset with tree-formed knowledge, and a retrieval-based dialog (RBD) dataset with candidate-formed knowledge. To bridge the gap between academic benchmarks and spoken dialog scenarios, we either collect data from real conversations or add spoken features to existing datasets via crowd-sourcing. The proposed experimental settings include the combinations of training with either the entire training set or a few-shot training set, and testing with either the standard test set or a hard test subset, which can assess model capabilities in terms of general prediction, fast adaptability and reliable robustness.", + "author": "Yinpei Dai; Wanwei He; Bowen Li; Yuchuan Wu; Zheng Cao; Zhongqi An; Jian Sun; Yongbin Li", + "authorids": "/y/yinpei-dai/; /w/wanwei-he/; /b/bowen-li/; /y/yuchuan-wu/; /z/zheng-cao/; /z/zhongqi-an/; /j/jian-sun/; /y/yongbin-li/", + "bibtex": "@inproceedings{dai-etal-2022-cgodial,\n title = \"{CG}o{D}ial: A Large-Scale Benchmark for {C}hinese Goal-oriented Dialog Evaluation\",\n author = \"Dai, Yinpei and\n He, Wanwei and\n Li, Bowen and\n Wu, Yuchuan and\n Cao, Zheng and\n An, Zhongqi and\n Sun, Jian and\n Li, Yongbin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.274/\",\n doi = \"10.18653/v1/2022.emnlp-main.274\",\n pages = \"4097--4111\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.274.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.274/", + "pdf_size": 883882, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1093090326573232760&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Alibaba Group, Beijing, China; Alibaba Group, Beijing, China; Alibaba Group, Beijing, China; Alibaba Group, Beijing, China; Alibaba Group, Beijing, China; Alibaba Group, Beijing, China; Alibaba Group, Beijing, China; Alibaba Group, Beijing, China", + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com; ", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com; ", + "github": "https://github.com/AlibabaResearch/DAMO-ConvAI/cgodial", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.540", + "title": "CHIA: CHoosing Instances to Annotate for Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Neural machine translation (MT) systems have been shown to perform poorly on low-resource language pairs, for which large-scale parallel data is unavailable. Making the data annotation process faster and cheaper is therefore important to ensure equitable access to MT systems. To make optimal use of a limited annotation budget, we present CHIA (choosing instances to annotate), a method for selecting instances to annotate for machine translation. Using an existing multi-way parallel dataset of high-resource languages, we first identify instances, based on model training dynamics, that are most informative for training MT models for high-resource languages. We find that there are cross-lingual commonalities in instances that are useful for MT model training, which we use to identify instances that will be useful to train models on a new target language. Evaluating on 20 languages from two corpora, we show that training on instances selected using our method provides an average performance improvement of 1.59 BLEU over training on randomly selected instances of the same size.", + "author": "Rajat Bhatnagar; Ananya Ganesh; Katharina Kann", + "authorids": "/r/rajat-bhatnagar/; /a/ananya-ganesh/; /k/katharina-von-der-wense/", + "bibtex": "@inproceedings{bhatnagar-etal-2022-chia,\n title = \"{CHIA}: {CH}oosing Instances to Annotate for Machine Translation\",\n author = \"Bhatnagar, Rajat and\n Ganesh, Ananya and\n Kann, Katharina\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.540/\",\n doi = \"10.18653/v1/2022.findings-emnlp.540\",\n pages = \"7299--7315\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.540.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.540/", + "pdf_size": 833795, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13278346114695572785&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Colorado Boulder; University of Colorado Boulder; University of Colorado Boulder", + "aff_domain": "colorado.edu;colorado.edu;colorado.edu", + "email": "colorado.edu;colorado.edu;colorado.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Colorado", + "aff_unique_dep": "", + "aff_unique_url": "https://www.colorado.edu", + "aff_unique_abbr": "CU Boulder", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Boulder", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.707", + "title": "CISLR: Corpus for Indian Sign Language Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Indian Sign Language, though used by a diverse community, still lacks well-annotated resources for developing systems that would enable sign language processing. In recent years researchers have actively worked for sign languages like American Sign Languages, however, Indian Sign language is still far from data-driven tasks like machine translation. To address this gap, in this paper, we introduce a new dataset CISLR (Corpus for Indian Sign Language Recognition) for word-level recognition in Indian Sign Language using videos. The corpus has a large vocabulary of around 4700 words covering different topics and domains. Further, we propose a baseline model for word recognition from sign language videos. To handle the low resource problem in the Indian Sign Language, the proposed model consists of a prototype-based one-shot learner that leverages resource rich American Sign Language to learn generalized features for improving predictions in Indian Sign Language. Our experiments show that gesture features learned in another sign language can help perform one-shot predictions in CISLR.", + "author": "Abhinav Joshi; Ashwani Bhat; Pradeep S; Priya Gole; Shashwat Gupta; Shreyansh Agarwal; Ashutosh Modi", + "authorids": "/a/abhinav-joshi/; /a/ashwani-bhat/; /p/pradeep-s/; /p/priya-gole/; /s/shashwat-gupta/; /s/shreyansh-agarwal/; /a/ashutosh-modi/", + "bibtex": "@inproceedings{joshi-etal-2022-cislr,\n title = \"{CISLR}: Corpus for {I}ndian {S}ign {L}anguage Recognition\",\n author = \"Joshi, Abhinav and\n Bhat, Ashwani and\n S, Pradeep and\n Gole, Priya and\n Gupta, Shashwat and\n Agarwal, Shreyansh and\n Modi, Ashutosh\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.707/\",\n doi = \"10.18653/v1/2022.emnlp-main.707\",\n pages = \"10357--10366\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.707.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.707/", + "pdf_size": 3939961, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7542620826838729642&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Indian Institute of Technology Kanpur (IIT-K); Indian Institute of Technology Kanpur (IIT-K); Indian Institute of Technology Kanpur (IIT-K); Indian Institute of Technology Kanpur (IIT-K); Indian Institute of Technology Kanpur (IIT-K); Indian Institute of Technology Kanpur (IIT-K); Indian Institute of Technology Kanpur (IIT-K)", + "aff_domain": "cse.iitk.ac.in;gmail.com;iitk.ac.in;iitk.ac.in;iitk.ac.in;iitk.ac.in;cse.iitk.ac.in", + "email": "cse.iitk.ac.in;gmail.com;iitk.ac.in;iitk.ac.in;iitk.ac.in;iitk.ac.in;cse.iitk.ac.in", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Indian Institute of Technology Kanpur", + "aff_unique_dep": "", + "aff_unique_url": "https://www.iitk.ac.in", + "aff_unique_abbr": "IIT-K", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Kanpur", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.findings-emnlp.30", + "title": "CLLE: A Benchmark for Continual Language Learning Evaluation in Multilingual Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Continual Language Learning (CLL) in multilingual translation is inevitable when new languages are required to be translated. Due to the lack of unified and generalized benchmarks, the evaluation of existing methods is greatly influenced by experimental design which usually has a big gap from the industrial demands. In this work, we propose the first Continual Language Learning Evaluation benchmark CLLE in multilingual translation. CLLE consists of a Chinese-centric corpus \u2014 CN-25 and two CLL tasks \u2014 the close-distance language continual learning task and the language family continual learning task designed for real and disparate demands. Different from existing translation benchmarks, CLLE considers several restrictions for CLL, including domain distribution alignment, content overlap, language diversity, and the balance of corpus. Furthermore, we propose a novel framework COMETA based on Constrained Optimization and META-learning to alleviate catastrophic forgetting and dependency on history training data by using a meta-model to retain the important parameters for old languages. Our experiments prove that CLLE is a challenging CLL benchmark and that our proposed method is effective when compared with other strong baselines. Due to the construction of the corpus, the task designing and the evaluation method are independent of the centric language, we also construct and release the English-centric corpus EN-25 to facilitate academic research.", + "author": "Han Zhang; Sheng Zhang; Yang Xiang; Bin Liang; Jinsong Su; Zhongjian Miao; Hui Wang; Ruifeng Xu", + "authorids": "/h/han-zhang/; /s/sheng-zhang/; /y/yang-xiang/; /b/bin-liang/; /j/jinsong-su/; /z/zhongjian-miao/; /h/hui-wang/; /r/ruifeng-xu/", + "bibtex": "https://aclanthology.org/2022.findings-emnlp.30.bib", + "pdf": "https://aclanthology.org/2022.findings-emnlp.30.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.30/", + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4969186900825570828&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "", + "author_num": 8 + }, + { + "id": "2022.emnlp-main.628", + "title": "CN-AutoMIC: Distilling Chinese Commonsense Knowledge from Pretrained Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Commonsense knowledge graphs (CKGs) are increasingly applied in various natural language processing tasks. However, most existing CKGs are limited to English, which hinders related research in non-English languages. Meanwhile, directly generating commonsense knowledge from pretrained language models has recently received attention, yet it has not been explored in non-English languages. In this paper, we propose a large-scale Chinese CKG generated from multilingual PLMs, named as **CN-AutoMIC**, aiming to fill the research gap of non-English CKGs. To improve the efficiency, we propose generate-by-category strategy to reduce invalid generation. To ensure the filtering quality, we develop cascaded filters to discard low-quality results. To further increase the diversity and density, we introduce a bootstrapping iteration process to reuse generated results. Finally, we conduct detailed analyses on CN-AutoMIC from different aspects. Empirical results show the proposed CKG has high quality and diversity, surpassing the direct translation version of similar English CKGs. We also find some interesting deficiency patterns and differences between relations, which reveal pending problems in commonsense knowledge generation. We share the resources and related models for further study.", + "author": "Chenhao Wang; Jiachun Li; Yubo Chen; Kang Liu; Jun Zhao", + "authorids": "/c/chenhao-wang/; /j/jiachun-li/; /y/yubo-chen/; /k/kang-liu/; /j/jun-zhao/", + "bibtex": "@inproceedings{wang-etal-2022-cn,\n title = \"{CN}-{A}uto{MIC}: Distilling {C}hinese Commonsense Knowledge from Pretrained Language Models\",\n author = \"Wang, Chenhao and\n Li, Jiachun and\n Chen, Yubo and\n Liu, Kang and\n Zhao, Jun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.628/\",\n doi = \"10.18653/v1/2022.emnlp-main.628\",\n pages = \"9253--9265\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.628.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.628/", + "pdf_size": 869174, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17212167260217752023&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 0, + "aff": "National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China+Beijing Academy of Artificial Intelligence, Beijing, China; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China", + "aff_domain": "nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "email": "nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "github": "http://github.com/wchrepo/cnautomic/", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;0+1+2;0+1", + "aff_unique_norm": "National Laboratory of Pattern Recognition;University of Chinese Academy of Sciences;Beijing Academy of Artificial Intelligence", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence;", + "aff_unique_url": ";http://www.ucas.ac.cn;https://www.baaic.cn", + "aff_unique_abbr": ";UCAS;BAAI", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0+0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.95", + "title": "COCO-DR: Combating the Distribution Shift in Zero-Shot Dense Retrieval with Contrastive and Distributionally Robust Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present a new zero-shot dense retrieval (ZeroDR) method, COCO-DR, to improve the generalization ability of dense retrieval by combating the distribution shifts between source training tasks and target scenarios. To mitigate the impact of document differences, COCO-DR continues pretraining the language model on the target corpora to adapt the model to target distributions via COtinuous COtrastive learning. To prepare for unseen target queries, COCO-DR leverages implicit Distributionally Robust Optimization (iDRO) to reweight samples from different source query clusters for improving model robustness over rare queries during fine-tuning. COCO-DR achieves superior average performance on BEIR, the zero-shot retrieval benchmark. At BERT_Base scale, COCO-DR Base outperforms other ZeroDR models with 60x larger size. At BERT_Large scale, COCO-DR Large outperforms the giant GPT-3 embedding model which has 500x more parameters. Our analysis shows the correlation between COCO-DR\u2019s effectiveness in combating distribution shifts and improving zero-shot accuracy. Our code and model can be found at https://github.com/OpenMatch/COCO-DR.", + "author": "Yue Yu; Chenyan Xiong; Si Sun; Chao Zhang; Arnold Overwijk", + "authorids": "/y/yue-yu/; /c/chenyan-xiong/; /s/si-sun/; /c/chao-zhang-tu/; /a/arnold-overwijk/", + "bibtex": "@inproceedings{yu-etal-2022-coco,\n title = \"{COCO}-{DR}: Combating the Distribution Shift in Zero-Shot Dense Retrieval with Contrastive and Distributionally Robust Learning\",\n author = \"Yu, Yue and\n Xiong, Chenyan and\n Sun, Si and\n Zhang, Chao and\n Overwijk, Arnold\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.95/\",\n doi = \"10.18653/v1/2022.emnlp-main.95\",\n pages = \"1462--1479\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.95.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.95/", + "pdf_size": 662467, + "gs_citation": 62, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6103679454353278503&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Georgia Institute of Technology; Microsoft; Tsinghua University; Georgia Institute of Technology; Microsoft", + "aff_domain": "gatech.edu;microsoft.com;mails.tsinghua.edu.cn;gatech.edu;microsoft.com", + "email": "gatech.edu;microsoft.com;mails.tsinghua.edu.cn;gatech.edu;microsoft.com", + "github": "https://github.com/OpenMatch/COCO-DR", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;1", + "aff_unique_norm": "Georgia Institute of Technology;Microsoft Corporation;Tsinghua University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.gatech.edu;https://www.microsoft.com;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "Georgia Tech;Microsoft;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.727", + "title": "CODER: An efficient framework for improving retrieval through COntextual Document Embedding Reranking", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Contrastive learning has been the dominant approach to training dense retrieval models. In this work, we investigate the impact of ranking context - an often overlooked aspect of learning dense retrieval models. In particular, we examine the effect of its constituent parts: jointly scoring a large number of negatives per query, using retrieved (query-specific) instead of random negatives, and a fully list-wise loss.To incorporate these factors into training, we introduce Contextual Document Embedding Reranking (CODER), a highly efficient retrieval framework. When reranking, it incurs only a negligible computational overhead on top of a first-stage method at run time (approx. 5 ms delay per query), allowing it to be easily combined with any state-of-the-art dual encoder method. Models trained through CODER can also be used as stand-alone retrievers.Evaluating CODER in a large set of experiments on the MS MARCO and TripClick collections, we show that the contextual reranking of precomputed document embeddings leads to a significant improvement in retrieval performance. This improvement becomes even more pronounced when more relevance information per query is available, shown in the TripClick collection, where we establish new state-of-the-art results by a large margin.", + "author": "George Zerveas; Navid Rekabsaz; Daniel Cohen; Carsten Eickhoff", + "authorids": "/g/george-zerveas/; /n/navid-rekabsaz/; /d/daniel-cohen/; /c/carsten-eickhoff/", + "bibtex": "@inproceedings{zerveas-etal-2022-coder,\n title = \"{CODER}: An efficient framework for improving retrieval through {CO}ntextual Document Embedding Reranking\",\n author = \"Zerveas, George and\n Rekabsaz, Navid and\n Cohen, Daniel and\n Eickhoff, Carsten\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.727/\",\n doi = \"10.18653/v1/2022.emnlp-main.727\",\n pages = \"10626--10644\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.727.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.727/", + "pdf_size": 977312, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=212197935531490394&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "AI Lab, Brown University, USA; Johannes Kepler University Linz, LIT AI Lab, Austria; AI Lab, Brown University, USA + Dataminr, USA; AI Lab, Brown University, USA", + "aff_domain": "brown.edu;jku.at;dataminr.com;brown.edu", + "email": "brown.edu;jku.at;dataminr.com;brown.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0+2;0", + "aff_unique_norm": "Brown University;Johannes Kepler University Linz;Dataminr", + "aff_unique_dep": "AI Lab;LIT AI Lab;", + "aff_unique_url": "https://www.brown.edu;https://www.jku.at;https://www.dataminr.com", + "aff_unique_abbr": "Brown;JKU;", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Linz", + "aff_country_unique_index": "0;1;0+0;0", + "aff_country_unique": "United States;Austria" + }, + { + "id": "2022.emnlp-main.796", + "title": "COLD: A Benchmark for Chinese Offensive Language Detection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Offensive language detection is increasingly crucial for maintaining a civilized social media platform and deploying pre-trained language models. However, this task in Chinese is still under exploration due to the scarcity of reliable datasets. To this end, we propose a benchmark \u2013COLD for Chinese offensive language analysis, including a Chinese Offensive Language Dataset \u2013COLDATASET and a baseline detector \u2013COLDETECTOR which is trained on the dataset. We show that the COLD benchmark contributes to Chinese offensive language detection which is challenging for existing resources. We then deploy the COLDETECTOR and conduct detailed analyses on popular Chinese pre-trained language models. We first analyze the offensiveness of existing generative models and show that these models inevitably expose varying degrees of offensive issues. Furthermore, we investigate the factors that influence the offensive generations, and we find that anti-bias contents and keywords referring to certain groups or revealing negative attitudes trigger offensive outputs easier.", + "author": "Jiawen Deng; Jingyan Zhou; Hao Sun; Chujie Zheng; Fei Mi; Helen Meng; Minlie Huang", + "authorids": "/j/jiawen-deng/; /j/jingyan-zhou/; /h/hao-sun/; /c/chujie-zheng/; /f/fei-mi/; /h/helen-meng/; /m/minlie-huang/", + "bibtex": "@inproceedings{deng-etal-2022-cold,\n title = \"{COLD}: A Benchmark for {C}hinese Offensive Language Detection\",\n author = \"Deng, Jiawen and\n Zhou, Jingyan and\n Sun, Hao and\n Zheng, Chujie and\n Mi, Fei and\n Meng, Helen and\n Huang, Minlie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.796/\",\n doi = \"10.18653/v1/2022.emnlp-main.796\",\n pages = \"11580--11599\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.796.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.796/", + "pdf_size": 3652312, + "gs_citation": 102, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9086355039997387027&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "https://github.com/thu-coai/COLDataset", + "project": "", + "author_num": 7 + }, + { + "id": "2022.emnlp-main.212", + "title": "COM-MRC: A COntext-Masked Machine Reading Comprehension Framework for Aspect Sentiment Triplet Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Aspect Sentiment Triplet Extraction (ASTE) aims to extract sentiment triplets from sentences, which was recently formalized as an effective machine reading comprehension (MRC) based framework. However, when facing multiple aspect terms, the MRC-based methods could fail due to the interference from other aspect terms. In this paper, we propose a novel COntext-Masked MRC (COM-MRC) framework for ASTE. Our COM-MRC framework comprises three closely-related components: a context augmentation strategy, a discriminative model, and an inference method. Specifically, a context augmentation strategy is designed by enumerating all masked contexts for each aspect term. The discriminative model comprises four modules, i.e., aspect and opinion extraction modules, sentiment classification and aspect detection modules. In addition, a two-stage inference method first extracts all aspects and then identifies their opinions and sentiment through iteratively masking the aspects. Extensive experimental results on benchmark datasets show the effectiveness of our proposed COM-MRC framework, which outperforms state-of-the-art methods consistently.", + "author": "Zepeng Zhai; Hao Chen; Fangxiang Feng; Ruifan Li; Xiaojie Wang", + "authorids": "/z/zepeng-zhai/; /h/hao-chen/; /f/fangxiang-feng/; /r/ruifan-li/; /x/xiaojie-wang/", + "bibtex": "@inproceedings{zhai-etal-2022-com,\n title = \"{COM}-{MRC}: A {CO}ntext-Masked Machine Reading Comprehension Framework for Aspect Sentiment Triplet Extraction\",\n author = \"Zhai, Zepeng and\n Chen, Hao and\n Feng, Fangxiang and\n Li, Ruifan and\n Wang, Xiaojie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.212/\",\n doi = \"10.18653/v1/2022.emnlp-main.212\",\n pages = \"3230--3241\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.212.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.212/", + "pdf_size": 1873015, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17985205871471410120&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China+Engineering Research Center of Information Networks, Ministry of Education, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China+Engineering Research Center of Information Networks, Ministry of Education, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China+Engineering Research Center of Information Networks, Ministry of Education, China", + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "https://github.com/zzp-seeker/COM-MRC", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+1;0+1;0+1", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Engineering Research Center of Information Networks", + "aff_unique_dep": "School of Artificial Intelligence;Ministry of Education", + "aff_unique_url": "http://www.bupt.edu.cn/;", + "aff_unique_abbr": "BUPT;", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.348", + "title": "COMET-QE and Active Learning for Low-Resource Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Active learning aims to deliver maximum benefit when resources are scarce. We use COMET-QE, a reference-free evaluation metric, to select sentences for low-resource neural machine translation. Using Swahili, Kinyarwanda and Spanish for our experiments, we show that COMET-QE significantly outperforms two variants of Round Trip Translation Likelihood (RTTL) and random sentence selection by up to 5 BLEU points for 20k sentences selected by Active Learning on a 30k baseline. This suggests that COMET-QE is a powerful tool for sentence selection in the very low-resource limit.", + "author": "Everlyn Asiko Chimoto; Bruce A. Bassett", + "authorids": "/e/everlyn-asiko-chimoto/; /b/bruce-a-bassett/", + "bibtex": "@inproceedings{chimoto-bassett-2022-comet,\n title = \"{COMET}-{QE} and Active Learning for Low-Resource Machine Translation\",\n author = \"Chimoto, Everlyn Asiko and\n Bassett, Bruce A.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.348/\",\n doi = \"10.18653/v1/2022.findings-emnlp.348\",\n pages = \"4735--4740\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.348.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.348/", + "pdf_size": 312194, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12538796355855355079&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "University of Cape Town, South Africa + African Institute for Mathematical Sciences; University of Cape Town, South Africa + African Institute for Mathematical Sciences, South Africa + South African Astronomical Observatory", + "aff_domain": "aims.ac.za;gmail.com", + "email": "aims.ac.za;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1+2", + "aff_unique_norm": "University of Cape Town;African Institute for Mathematical Sciences;South African Astronomical Observatory", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.uct.ac.za;https://www.aims.ac.za;https://www.saastronomy.org", + "aff_unique_abbr": "UCT;AIMS;SAAO", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0+0", + "aff_country_unique": "South Africa" + }, + { + "id": "2022.emnlp-main.598", + "title": "CONDAQA: A Contrastive Reading Comprehension Dataset for Reasoning about Negation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The full power of human language-based communication cannot be realized without negation. All human languages have some form of negation. Despite this, negation remains a challenging phenomenon for current natural language understanding systems. To facilitate the future development of models that can process negation effectively, we present CONDAQA, the first English reading comprehension dataset which requires reasoning about the implications of negated statements in paragraphs. We collect paragraphs with diverse negation cues, then have crowdworkers ask questions about the implications of the negated statement in the passage. We also have workers make three kinds of edits to the passage\u2014paraphrasing the negated statement, changing the scope of the negation, and reversing the negation\u2014resulting in clusters of question-answer pairs that are difficult for models to answer with spurious shortcuts. CONDAQA features 14,182 question-answer pairs with over 200 unique negation cues and is challenging for current state-of-the-art models. The best performing model on CONDAQA (UnifiedQA-v2-3b) achieves only 42% on our consistency metric, well below human performance which is 81%. We release our dataset, along with fully-finetuned, few-shot, and zero-shot evaluations, to facilitate the development of future NLP methods that work on negated language.", + "author": "Abhilasha Ravichander; Matt Gardner; Ana Marasovic", + "authorids": "/a/abhilasha-ravichander/; /m/matt-gardner/; /a/ana-marasovic/", + "bibtex": "@inproceedings{ravichander-etal-2022-condaqa,\n title = \"{CONDAQA}: A Contrastive Reading Comprehension Dataset for Reasoning about Negation\",\n author = \"Ravichander, Abhilasha and\n Gardner, Matt and\n Marasovic, Ana\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.598/\",\n doi = \"10.18653/v1/2022.emnlp-main.598\",\n pages = \"8729--8755\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.598.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.598/", + "pdf_size": 4488366, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=917196846802537411&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Carnegie Mellon University; Microsoft Semantic Machines; University of Utah + Allen Institute for AI", + "aff_domain": "cs.cmu.edu;microsoft.com;utah.edu", + "email": "cs.cmu.edu;microsoft.com;utah.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2+3", + "aff_unique_norm": "Carnegie Mellon University;Microsoft;University of Utah;Allen Institute for AI", + "aff_unique_dep": ";Semantic Machines;;", + "aff_unique_url": "https://www.cmu.edu;https://www.microsoft.com;https://www.utah.edu;https://allenai.org", + "aff_unique_abbr": "CMU;Microsoft;Utah;AI2", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.679", + "title": "CONQRR: Conversational Query Rewriting for Retrieval with Reinforcement Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Compared to standard retrieval tasks, passage retrieval for conversational question answering (CQA) poses new challenges in understanding the current user question, as each question needs to be interpreted within the dialogue context. Moreover, it can be expensive to re-train well-established retrievers such as search engines that are originally developed for non-conversational queries. To facilitate their use, we develop a query rewriting model CONQRR that rewrites a conversational question in the context into a standalone question. It is trained with a novel reward function to directly optimize towards retrieval using reinforcement learning and can be adapted to any off-the-shelf retriever. CONQRR achieves state-of-the-art results on a recent open-domain CQA dataset containing conversations from three different sources, and is effective for two different off-the-shelf retrievers. Our extensive analysis also shows the robustness of CONQRR to out-of-domain dialogues as well as to zero query rewriting supervision.", + "author": "Zeqiu Wu; Yi Luan; Hannah Rashkin; David Reitter; Hannaneh Hajishirzi; Mari Ostendorf; Gaurav Singh Tomar", + "authorids": "/z/zeqiu-wu/; /y/yi-luan/; /h/hannah-rashkin/; /d/david-reitter/; /h/hannaneh-hajishirzi/; /m/mari-ostendorf/; /g/gaurav-singh-tomar/", + "bibtex": "@inproceedings{wu-etal-2022-conqrr,\n title = \"{CONQRR}: Conversational Query Rewriting for Retrieval with Reinforcement Learning\",\n author = \"Wu, Zeqiu and\n Luan, Yi and\n Rashkin, Hannah and\n Reitter, David and\n Hajishirzi, Hannaneh and\n Ostendorf, Mari and\n Tomar, Gaurav Singh\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.679/\",\n doi = \"10.18653/v1/2022.emnlp-main.679\",\n pages = \"10000--10014\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.679.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.679/", + "pdf_size": 1203561, + "gs_citation": 71, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17404364782451714295&as_sdt=5,36&sciodt=0,36&hl=en", + "gs_version_total": 4, + "aff": "University of Washington\u2666; Google Research\u2660; Allen Institute for AI\u2663; University of Washington\u2666; Google Research\u2660; University of Washington\u2666; Google Research\u2660", + "aff_domain": "uw.edu;google.com;google.com;google.com;uw.edu;uw.edu;google.com", + "email": "uw.edu;google.com;google.com;google.com;uw.edu;uw.edu;google.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;0;1;0;1", + "aff_unique_norm": "University of Washington;Google;Allen Institute for AI", + "aff_unique_dep": ";Google Research;", + "aff_unique_url": "https://www.washington.edu;https://research.google;https://allenai.org", + "aff_unique_abbr": "UW;Google Research;AI2", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.517", + "title": "CONSISTENT: Open-Ended Question Generation From News Articles", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent work on question generation has largely focused on factoid questions such as who, what,where, when about basic facts. Generating open-ended why, how, what, etc. questions thatrequire long-form answers have proven more difficult. To facilitate the generation of openended questions, we propose CONSISTENT, a new end-to-end system for generating openended questions that are answerable from and faithful to the input text. Using news articles asa trustworthy foundation for experimentation, we demonstrate our model\u2019s strength over several baselines using both automatic and human based evaluations. We contribute an evaluationdataset of expert-generated open-ended questions. We discuss potential downstream applications for news media organizations.", + "author": "Tuhin Chakrabarty; Justin Lewis; Smaranda Muresan", + "authorids": "/t/tuhin-chakrabarty/; /j/justin-lewis/; /s/smaranda-muresan/", + "bibtex": "@inproceedings{chakrabarty-etal-2022-consistent,\n title = \"{CONSISTENT}: Open-Ended Question Generation From News Articles\",\n author = \"Chakrabarty, Tuhin and\n Lewis, Justin and\n Muresan, Smaranda\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.517/\",\n doi = \"10.18653/v1/2022.findings-emnlp.517\",\n pages = \"6954--6968\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.517.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.517/", + "pdf_size": 1422548, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9372647243706462509&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, Columbia University + The New York Times R&D; The New York Times R&D; Department of Computer Science, Columbia University", + "aff_domain": "cs.columbia.edu;justintlewis.com;cs.columbia.edu", + "email": "cs.columbia.edu;justintlewis.com;cs.columbia.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;0", + "aff_unique_norm": "Columbia University;The New York Times", + "aff_unique_dep": "Department of Computer Science;R&D", + "aff_unique_url": "https://www.columbia.edu;https://www.nytimes.com", + "aff_unique_abbr": "Columbia;NYT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.335", + "title": "COPEN: Probing Conceptual Knowledge in Pre-trained Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Conceptual knowledge is fundamental to human cognition and knowledge bases. However, existing knowledge probing works only focus on evaluating factual knowledge of pre-trained language models (PLMs) and ignore conceptual knowledge. Since conceptual knowledge often appears as implicit commonsense behind texts, designing probes for conceptual knowledge is hard. Inspired by knowledge representation schemata, we comprehensively evaluate conceptual knowledge of PLMs by designing three tasks to probe whether PLMs organize entities by conceptual similarities, learn conceptual properties, and conceptualize entities in contexts, respectively. For the tasks, we collect and annotate 24k data instances covering 393 concepts, which is COPEN, a COnceptual knowledge Probing bENchmark. Extensive experiments on different sizes and types of PLMs show that existing PLMs systematically lack conceptual knowledge and suffer from various spurious correlations. We believe this is a critical bottleneck for realizing human-like cognition in PLMs. COPEN and our codes are publicly released at https://github.com/THU-KEG/COPEN.", + "author": "Hao Peng; Xiaozhi Wang; Shengding Hu; Hailong Jin; Lei Hou; Juanzi Li; Zhiyuan Liu; Qun Liu", + "authorids": "/h/hao-peng/; /x/xiaozhi-wang/; /s/shengding-hu/; /h/hailong-jin/; /l/lei-hou/; /j/juanzi-li/; /z/zhiyuan-liu/; /q/qun-liu/", + "bibtex": "@inproceedings{peng-etal-2022-copen,\n title = \"{COPEN}: Probing Conceptual Knowledge in Pre-trained Language Models\",\n author = \"Peng, Hao and\n Wang, Xiaozhi and\n Hu, Shengding and\n Jin, Hailong and\n Hou, Lei and\n Li, Juanzi and\n Liu, Zhiyuan and\n Liu, Qun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.335/\",\n doi = \"10.18653/v1/2022.emnlp-main.335\",\n pages = \"5015--5035\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.335.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.335/", + "pdf_size": 1963237, + "gs_citation": 47, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1868207637292644001&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing, 100084, China; Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing, 100084, China; Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing, 100084, China; Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing, 100084, China; Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing, 100084, China; Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing, 100084, China; Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing, 100084, China; Huawei Noah\u2019s Ark Lab", + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ; ; ; ", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ; ; ; ", + "github": "https://github.com/THU-KEG/COPEN", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1;0+1;0+1;2", + "aff_unique_norm": "BNRist;Tsinghua University;Huawei", + "aff_unique_dep": "Department of Computer Science and Technology;Institute for Artificial Intelligence;Noah\u2019s Ark Lab", + "aff_unique_url": ";https://www.tsinghua.edu.cn;https://www.huawei.com", + "aff_unique_abbr": ";Tsinghua;Huawei", + "aff_campus_unique_index": "1;1;1;1;1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "1;1;1;1;1;1;1;1", + "aff_country_unique": ";China" + }, + { + "id": "2022.findings-emnlp.216", + "title": "CORE: A Retrieve-then-Edit Framework for Counterfactual Data Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Counterfactual data augmentation (CDA) \u2013 i.e., adding minimally perturbed inputs during training \u2013 helps reduce model reliance on spurious correlations and improves generalization to out-of-distribution (OOD) data. Prior work on generating counterfactuals only considered restricted classes of perturbations, limiting their effectiveness. We present Counterfactual Generation via Retrieval and Editing (CORE), a retrieval-augmented generation framework for creating diverse counterfactual perturbations for CDA. For each training example, CORE first performs a dense retrieval over a task-related unlabeled text corpus using a learned bi-encoder and extracts relevant counterfactual excerpts. CORE then incorporates these into prompts to a large language model with few-shot learning capabilities, for counterfactual editing. Conditioning language model edits on naturally occurring data results in more diverse perturbations. Experiments on natural language inference and sentiment analysis benchmarks show that CORE counterfactuals are more effective at improving generalization to OOD data compared to other DA approaches. We also show that the CORE retrieval framework can be used to encourage diversity in manually authored perturbations.", + "author": "Tanay Dixit; Bhargavi Paranjape; Hannaneh Hajishirzi; Luke Zettlemoyer", + "authorids": "/t/tanay-dixit/; /b/bhargavi-paranjape/; /h/hannaneh-hajishirzi/; /l/luke-zettlemoyer/", + "bibtex": "@inproceedings{dixit-etal-2022-core,\n title = \"{CORE}: A Retrieve-then-Edit Framework for Counterfactual Data Generation\",\n author = \"Dixit, Tanay and\n Paranjape, Bhargavi and\n Hajishirzi, Hannaneh and\n Zettlemoyer, Luke\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.216/\",\n doi = \"10.18653/v1/2022.findings-emnlp.216\",\n pages = \"2964--2984\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.216.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.216/", + "pdf_size": 1813708, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14700244807847198886&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff": "Indian Institute of Technology, Madras; Paul G. Allen School of Computer Science & Engineering, University of Washington + Allen Institute of Artificial Intelligence, Seattle + Meta AI; Paul G. Allen School of Computer Science & Engineering, University of Washington + Allen Institute of Artificial Intelligence, Seattle + Meta AI; Paul G. Allen School of Computer Science & Engineering, University of Washington + Meta AI", + "aff_domain": "smail.iitm.ac.in;cs.washington.edu;cs.washington.edu;cs.washington.edu", + "email": "smail.iitm.ac.in;cs.washington.edu;cs.washington.edu;cs.washington.edu", + "github": "https://github.com/tanay2001/CORE", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2+3;1+2+3;1+3", + "aff_unique_norm": "Indian Institute of Technology Madras;University of Washington;Allen Institute for Artificial Intelligence;Meta Platforms, Inc.", + "aff_unique_dep": ";Paul G. Allen School of Computer Science & Engineering;Artificial Intelligence;Meta AI", + "aff_unique_url": "https://www.iitm.ac.in;https://www.washington.edu;https://allenai.org;https://meta.com", + "aff_unique_abbr": "IIT Madras;UW;AI2;Meta", + "aff_campus_unique_index": "0;1+1;1+1;1", + "aff_campus_unique": "Madras;Seattle;", + "aff_country_unique_index": "0;1+1+1;1+1+1;1+1", + "aff_country_unique": "India;United States" + }, + { + "id": "2022.findings-emnlp.524", + "title": "CORT: A New Baseline for Comparative Opinion Classification by Dual Prompts", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Comparative opinion is a common linguistic phenomenon. The opinion is expressed by comparing multiple targets on a shared aspect, e.g., \u201ccamera A is better than camera B in picture quality\u201d. Among the various subtasks in opinion mining, comparative opinion classification is relatively less studied. Current solutions use rules or classifiers to identify opinions, i.e., better, worse, or same, through feature engineering. Because the features are directly derived from the input sentence, these solutions are sensitive to the order of the targets mentioned in the sentence. For example, \u201ccamera A is better than camera B\u201d means the same as \u201ccamera B is worse than camera A\u201d; but the features of these two sentences are completely different. In this paper, we approach comparative opinion classification through prompt learning, taking the advantage of embedded knowledge in pre-trained language model. We design a twin framework with dual prompts, named CORT. This extremely simple model delivers state-of-the-art and robust performance on all benchmark datasets for comparative opinion classification. We believe CORT well serves as a new baseline for comparative opinion classification.", + "author": "Yequan Wang; Hengran Zhang; Aixin Sun; Xuying Meng", + "authorids": "/y/yequan-wang/; /h/hengran-zhang/; /a/aixin-sun/; /x/xuying-meng/", + "bibtex": "@inproceedings{wang-etal-2022-cort,\n title = \"{CORT}: A New Baseline for Comparative Opinion Classification by Dual Prompts\",\n author = \"Wang, Yequan and\n Zhang, Hengran and\n Sun, Aixin and\n Meng, Xuying\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.524/\",\n doi = \"10.18653/v1/2022.findings-emnlp.524\",\n pages = \"7064--7075\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.524.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.524/", + "pdf_size": 378856, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12362817520544952761&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Beijing Academy of Artificial Intelligence, Beijing, China; Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China+University of Chinese Academy of Sciences, Beijing, China; School of Computer Science and Engineering, Nanyang Technological University, Singapore; Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China", + "aff_domain": "gmail.com;ntu.edu.sg;ict.ac.cn;ict.ac.cn", + "email": "gmail.com;ntu.edu.sg;ict.ac.cn;ict.ac.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2;3;1", + "aff_unique_norm": "Beijing Academy of Artificial Intelligence;Chinese Academy of Sciences;University of Chinese Academy of Sciences;Nanyang Technological University", + "aff_unique_dep": ";Institute of Computing Technology;;School of Computer Science and Engineering", + "aff_unique_url": "https://www.baaic.cn;http://www.ict.ac.cn;http://www.ucas.ac.cn;https://www.ntu.edu.sg", + "aff_unique_abbr": "BAAI;CAS;UCAS;NTU", + "aff_campus_unique_index": "0;0+0;1;0", + "aff_campus_unique": "Beijing;Singapore", + "aff_country_unique_index": "0;0+0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.emnlp-main.112", + "title": "COST-EFF: Collaborative Optimization of Spatial and Temporal Efficiency with Slenderized Multi-exit Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transformer-based pre-trained language models (PLMs) mostly suffer from excessive overhead despite their advanced capacity. For resource-constrained devices, there is an urgent need for a spatially and temporally efficient model which retains the major capacity of PLMs. However, existing statically compressed models are unaware of the diverse complexities between input instances, potentially resulting in redundancy and inadequacy for simple and complex inputs. Also, miniature models with early exiting encounter challenges in the trade-off between making predictions and serving the deeper layers. Motivated by such considerations, we propose a collaborative optimization for PLMs that integrates static model compression and dynamic inference acceleration. Specifically, the PLM is slenderized in width while the depth remains intact, complementing layer-wise early exiting to speed up inference dynamically. To address the trade-off of early exiting, we propose a joint training approach that calibrates slenderization and preserves contributive structures to each exit instead of only the final layer. Experiments are conducted on GLUE benchmark and the results verify the Pareto optimality of our approach at high compression and acceleration rate with 1/8 parameters and 1/19 FLOPs of BERT.", + "author": "Bowen Shen; Zheng Lin; Yuanxin Liu; Zhengxiao Liu; Lei Wang; Weiping Wang", + "authorids": "/b/bowen-shen/; /z/zheng-lin/; /y/yuanxin-liu/; /z/zhengxiao-liu/; /l/lei-wang/; /w/weiping-wang/", + "bibtex": "@inproceedings{shen-etal-2022-cost,\n title = \"{COST}-{EFF}: Collaborative Optimization of Spatial and Temporal Efficiency with Slenderized Multi-exit Language Models\",\n author = \"Shen, Bowen and\n Lin, Zheng and\n Liu, Yuanxin and\n Liu, Zhengxiao and\n Wang, Lei and\n Wang, Weiping\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.112/\",\n doi = \"10.18653/v1/2022.emnlp-main.112\",\n pages = \"1719--1730\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.112.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.112/", + "pdf_size": 607702, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14246026446941228580&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 3, + "aff": "Institute of Information Engineering, Chinese Academy of Sciences + School of Cyber Security, University of Chinese Academy of Sciences; Institute of Information Engineering, Chinese Academy of Sciences + School of Cyber Security, University of Chinese Academy of Sciences; Institute of Information Engineering, Chinese Academy of Sciences + MOE Key Laboratory of Computational Linguistics, Peking University; Institute of Information Engineering, Chinese Academy of Sciences; Institute of Information Engineering, Chinese Academy of Sciences + School of Cyber Security, University of Chinese Academy of Sciences; Institute of Information Engineering, Chinese Academy of Sciences", + "aff_domain": "iie.ac.cn;iie.ac.cn;stu.pku.edu.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn", + "email": "iie.ac.cn;iie.ac.cn;stu.pku.edu.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+2;0;0+1;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Peking University", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;MOE Key Laboratory of Computational Linguistics", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;http://www.pku.edu.cn", + "aff_unique_abbr": "CAS;UCAS;PKU", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.224", + "title": "CPL: Counterfactual Prompt Learning for Vision and Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prompt tuning is a new few-shot transfer learning technique that only tunes the learnable prompt for pre-trained vision and language models such as CLIP. However, existing prompt tuning methods tend to learn spurious or entangled representations, which leads to poor generalization to unseen concepts.Towards non-spurious and efficient prompt learning from limited examples, this paper presents a novel Counterfactual Prompt Learning (CPL) method for vision and language models, which simultaneously employs counterfactual generation and contrastive learning in a joint optimization framework.Particularly, CPL constructs counterfactual by identifying minimal non-spurious feature change between semantically-similar positive and negative samples that causes concept change, and learns more generalizable prompt representation from both factual and counterfactual examples via contrastive learning. Extensive experiments demonstrate that CPL can obtain superior few-shot performance on different vision and language tasks than previous prompt tuning methods on CLIP. On image classification, we achieve 3.55% average relative improvement on unseen classes across seven datasets; on image-text retrieval and visual question answering, we gain up to 4.09% and 25.08% relative improvements across three few-shot scenarios on unseen test sets respectively.", + "author": "Xuehai He; Diji Yang; Weixi Feng; Tsu-Jui Fu; Arjun Akula; Varun Jampani; Pradyumna Narayana; Sugato Basu; William Yang Wang; Xin Wang", + "authorids": "/x/xuehai-he/; /d/diji-yang/; /w/weixi-feng/; /t/tsu-jui-fu/; /a/arjun-akula/; /v/varun-jampani/; /p/pradyumna-narayana/; /s/sugato-basu/; /w/william-yang-wang/; /x/xin-wang/", + "bibtex": "@inproceedings{he-etal-2022-cpl,\n title = \"{CPL}: Counterfactual Prompt Learning for Vision and Language Models\",\n author = \"He, Xuehai and\n Yang, Diji and\n Feng, Weixi and\n Fu, Tsu-Jui and\n Akula, Arjun and\n Jampani, Varun and\n Narayana, Pradyumna and\n Basu, Sugato and\n Wang, William Yang and\n Wang, Xin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.224/\",\n doi = \"10.18653/v1/2022.emnlp-main.224\",\n pages = \"3407--3418\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.224.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.224/", + "pdf_size": 3505047, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10432693475831437077&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "UC Santa Cruz; UC Santa Cruz; UC Santa Barbara; UC Santa Barbara; Google; Google; Google; Google; UC Santa Barbara; UC Santa Cruz", + "aff_domain": "ucsc.edu;ucsc.edu;ucsb.edu;ucsb.edu;google.com;google.com;google.com;google.com;ucsb.edu;ucsc.edu", + "email": "ucsc.edu;ucsc.edu;ucsb.edu;ucsb.edu;google.com;google.com;google.com;google.com;ucsb.edu;ucsc.edu", + "github": "https://github.com/eric-ai-lab/CPL", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;1;1;2;2;2;2;1;0", + "aff_unique_norm": "University of California, Santa Cruz;University of California, Santa Barbara;Google", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ucsc.edu;https://www.ucsb.edu;https://www.google.com", + "aff_unique_abbr": "UCSC;UCSB;Google", + "aff_campus_unique_index": "0;0;1;1;2;2;2;2;1;0", + "aff_campus_unique": "Santa Cruz;Santa Barbara;Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.150", + "title": "CQR-SQL: Conversational Question Reformulation Enhanced Context-Dependent Text-to-SQL Parsers", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Context-dependent text-to-SQL is the task of translating multi-turn questions into database-related SQL queries. Existing methods typically focus on making full use of history context or previously predicted SQL for currently SQL parsing, while neglecting to explicitly comprehend the schema and conversational dependency, such as co-reference, ellipsis and user focus change. In this paper, we propose CQR-SQL, which uses auxiliary Conversational Question Reformulation (CQR) learning to explicitly exploit schema and decouple contextual dependency for multi-turn SQL parsing. Specifically, we first present a schema enhanced recursive CQR method to produce domain-relevant self-contained questions. Secondly, we train CQR-SQL models to map the semantics of multi-turn questions and auxiliary self-contained questions into the same latent space through schema grounding consistency task and tree-structured SQL parsing consistency task, which enhances the abilities of SQL parsing by adequately contextual understanding. At the time of writing, our CQR-SQL achieves new state-of-the-art results on two context-dependent text-to-SQL benchmarks SParC and CoSQL.", + "author": "Dongling Xiao; LinZheng Chai; Qian-Wen Zhang; Zhao Yan; Zhoujun Li; Yunbo Cao", + "authorids": "/d/dongling-xiao/; /l/linzheng-chai/; /q/qian-wen-zhang/; /z/zhao-yan/; /z/zhoujun-li/; /y/yunbo-cao/", + "bibtex": "@inproceedings{xiao-etal-2022-cqr,\n title = \"{CQR}-{SQL}: Conversational Question Reformulation Enhanced Context-Dependent Text-to-{SQL} Parsers\",\n author = \"Xiao, Dongling and\n Chai, LinZheng and\n Zhang, Qian-Wen and\n Yan, Zhao and\n Li, Zhoujun and\n Cao, Yunbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.150/\",\n doi = \"10.18653/v1/2022.findings-emnlp.150\",\n pages = \"2055--2068\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.150.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.150/", + "pdf_size": 4083624, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1278326555437060906&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.670", + "title": "CRIPP-VQA: Counterfactual Reasoning about Implicit Physical Properties via Video Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Videos often capture objects, their visible properties, their motion, and the interactions between different objects. Objects also have physical properties such as mass, which the imaging pipeline is unable to directly capture. However, these properties can be estimated by utilizing cues from relative object motion and the dynamics introduced by collisions. In this paper, we introduce CRIPP-VQA, a new video question answering dataset for reasoning about the implicit physical properties of objects in a scene. CRIPP-VQA contains videos of objects in motion, annotated with questions that involve counterfactual reasoning about the effect of actions, questions about planning in order to reach a goal, and descriptive questions about visible properties of objects. The CRIPP-VQA test set enables evaluation under several out-of-distribution settings \u2013 videos with objects with masses, coefficients of friction, and initial velocities that are not observed in the training distribution. Our experiments reveal a surprising and significant performance gap in terms of answering questions about implicit properties (the focus of this paper) and explicit properties of objects (the focus of prior work).", + "author": "Maitreya Patel; Tejas Gokhale; Chitta Baral; Yezhou Yang", + "authorids": "/m/maitreya-patel/; /t/tejas-gokhale/; /c/chitta-baral/; /y/yezhou-yang/", + "bibtex": "@inproceedings{patel-etal-2022-cripp,\n title = \"{CRIPP}-{VQA}: Counterfactual Reasoning about Implicit Physical Properties via Video Question Answering\",\n author = \"Patel, Maitreya and\n Gokhale, Tejas and\n Baral, Chitta and\n Yang, Yezhou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.670/\",\n doi = \"10.18653/v1/2022.emnlp-main.670\",\n pages = \"9856--9870\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.670.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.670/", + "pdf_size": 3690057, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3357996473820792244&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Arizona State University; Arizona State University; Arizona State University; Arizona State University", + "aff_domain": "asu.edu;asu.edu;asu.edu;asu.edu", + "email": "asu.edu;asu.edu;asu.edu;asu.edu", + "github": "", + "project": "https://maitreyapatel.com/CRIPP-VQA/", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Arizona State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.asu.edu", + "aff_unique_abbr": "ASU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.34", + "title": "CROP: Zero-shot Cross-lingual Named Entity Recognition with Multilingual Labeled Sequence Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Named entity recognition (NER) suffers from the scarcity of annotated training data, especially for low-resource languages without labeled data. Cross-lingual NER has been proposed to alleviate this issue by transferring knowledge from high-resource languages to low-resource languages via aligned cross-lingual representations or machine translation results. However, the performance of cross-lingual NER methods is severely affected by the unsatisfactory quality of translation or label projection. To address these problems, we propose a Cross-lingual Entity Projection framework (CROP) to enable zero-shot cross-lingual NER with the help of a multilingual labeled sequence translation model. Specifically, the target sequence is first translated into the source language and then tagged by a source NER model. We further adopt a labeled sequence translation model to project the tagged sequence back to the target language and label the target raw sentence. Ultimately, the whole pipeline is integrated into an end-to-end model by the way of self-training. Experimental results on two benchmarks demonstrate that our method substantially outperforms the previous strong baseline by a large margin of +3 7 F1 scores and achieves state-of-the-art performance.", + "author": "Jian Yang; Shaohan Huang; Shuming Ma; Yuwei Yin; Li Dong; Dongdong Zhang; Hongcheng Guo; Zhoujun Li; Furu Wei", + "authorids": "/j/jian-yang/; /s/shaohan-huang/; /s/shuming-ma/; /y/yuwei-yin/; /l/li-dong/; /d/dongdong-zhang/; /h/hongcheng-guo/; /z/zhoujun-li/; /f/furu-wei/", + "bibtex": "@inproceedings{yang-etal-2022-crop,\n title = \"{CROP}: Zero-shot Cross-lingual Named Entity Recognition with Multilingual Labeled Sequence Translation\",\n author = \"Yang, Jian and\n Huang, Shaohan and\n Ma, Shuming and\n Yin, Yuwei and\n Dong, Li and\n Zhang, Dongdong and\n Guo, Hongcheng and\n Li, Zhoujun and\n Wei, Furu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.34/\",\n doi = \"10.18653/v1/2022.findings-emnlp.34\",\n pages = \"486--496\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.34.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.34/", + "pdf_size": 3471329, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12557727126290074273&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "State Key Lab of Software Development Environment, Beihang University; Microsoft Research Asia; Microsoft Research Asia; The University of Hong Kong; Microsoft Research Asia; Microsoft Research Asia; State Key Lab of Software Development Environment, Beihang University; State Key Lab of Software Development Environment, Beihang University; Microsoft Research Asia", + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;hku.hk", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;hku.hk", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;1;2;1;1;0;0;1", + "aff_unique_norm": "Beihang University;Microsoft Research;The University of Hong Kong", + "aff_unique_dep": "State Key Lab of Software Development Environment;Research;", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.microsoft.com/en-us/research/group/asia;https://www.hku.hk", + "aff_unique_abbr": "Beihang;MSR Asia;HKU", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.662", + "title": "CTL++: Evaluating Generalization on Never-Seen Compositional Patterns of Known Functions, and Compatibility of Neural Representations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Well-designed diagnostic tasks have played a key role in studying the failure of neural nets (NNs) to generalize systematically. Famous examples include SCAN and Compositional Table Lookup (CTL). Here we introduce CTL++, a new diagnostic dataset based on compositions of unary symbolic functions. While the original CTL is used to test length generalization or productivity, CTL++ is designed to test systematicity of NNs, that is, their capability to generalize to unseen compositions of known functions. CTL++ splits functions into groups and tests performance on group elements composed in a way not seen during training. We show that recent CTL-solving Transformer variants fail on CTL++. The simplicity of the task design allows for fine-grained control of task difficulty, as well as many insightful analyses. For example, we measure how much overlap between groups is needed by tested NNs for learning to compose. We also visualize how learned symbol representations in outputs of functions from different groups are compatible in case of success but not in case of failure. These results provide insights into failure cases reported on more complex compositions in the natural language domain. Our code is public.", + "author": "R\u00f3bert Csord\u00e1s; Kazuki Irie; Juergen Schmidhuber", + "authorids": "/r/robert-csordas/; /k/kazuki-irie/; /j/juergen-schmidhuber/", + "bibtex": "@inproceedings{csordas-etal-2022-ctl,\n title = \"{CTL}++: Evaluating Generalization on Never-Seen Compositional Patterns of Known Functions, and Compatibility of Neural Representations\",\n author = \"Csord{\\'a}s, R{\\'o}bert and\n Irie, Kazuki and\n Schmidhuber, Juergen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.662/\",\n doi = \"10.18653/v1/2022.emnlp-main.662\",\n pages = \"9758--9767\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.662.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.662/", + "pdf_size": 631032, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10884935910480391739&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "The Swiss AI Lab IDSIA, USI & SUPSI, Lugano, Switzerland; The Swiss AI Lab IDSIA, USI & SUPSI, Lugano, Switzerland; The Swiss AI Lab IDSIA, USI & SUPSI, Lugano, Switzerland + AI Initiative, KAUST, Thuwal, Saudi Arabia", + "aff_domain": "idsia.ch;idsia.ch;idsia.ch", + "email": "idsia.ch;idsia.ch;idsia.ch", + "github": "https://github.com/robertcsordas/ctlpp", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "Swiss AI Lab IDSIA;King Abdullah University of Science and Technology", + "aff_unique_dep": "AI Lab;AI Initiative", + "aff_unique_url": "https://www.idsia.ch/;https://www.kaust.edu.sa", + "aff_unique_abbr": "IDSIA;KAUST", + "aff_campus_unique_index": "0;0;0+1", + "aff_campus_unique": "Lugano;Thuwal", + "aff_country_unique_index": "0;0;0+1", + "aff_country_unique": "Switzerland;Saudi Arabia" + }, + { + "id": "2022.emnlp-main.396", + "title": "CTRLsum: Towards Generic Controllable Text Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Current summarization systems yield generic summaries that are disconnected from users\u2019 preferences and expectations. To address this limitation, we present CTRLsum, a generic framework to control generated summaries through a set of keywords. During training keywords are extracted automatically without requiring additional human annotations. At test time CTRLsum features a control function to map control signal to keywords; through engineering the control function, the same trained model is able to be applied to control summaries on various dimensions, while neither affecting the model training process nor the pretrained models. We additionally explore the combination of keywords and text prompts for more control tasks. Experiments demonstrate the effectiveness of CTRLsum on three domains of summarization datasets and five control tasks: (1) entity-centric and (2) length-controllable summarization, (3) contribution summarization on scientific papers, (4) invention purpose summarization on patent filings, and (5) question-guided summarization on news articles. Moreover, when used in a standard, unconstrained summarization setting, CTRLsum is comparable or better than strong pretrained systems.", + "author": "Junxian He; Wojciech Kryscinski; Bryan McCann; Nazneen Rajani; Caiming Xiong", + "authorids": "/j/junxian-he/; /w/wojciech-kryscinski/; /b/bryan-mccann/; /n/nazneen-rajani/; /c/caiming-xiong/", + "bibtex": "@inproceedings{he-etal-2022-ctrlsum,\n title = \"{CTRL}sum: Towards Generic Controllable Text Summarization\",\n author = \"He, Junxian and\n Kryscinski, Wojciech and\n McCann, Bryan and\n Rajani, Nazneen and\n Xiong, Caiming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.396/\",\n doi = \"10.18653/v1/2022.emnlp-main.396\",\n pages = \"5879--5915\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.396.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.396/", + "pdf_size": 1084429, + "gs_citation": 161, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1429503522379812889&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Carnegie Mellon University+Salesforce Research; Salesforce Research; Salesforce Research; Salesforce Research; Salesforce Research", + "aff_domain": "cs.cmu.edu;salesforce.com;salesforce.com;salesforce.com;salesforce.com", + "email": "cs.cmu.edu;salesforce.com;salesforce.com;salesforce.com;salesforce.com", + "github": "https://github.com/salesforce/ctrl-sum", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;1;1;1", + "aff_unique_norm": "Carnegie Mellon University;Salesforce", + "aff_unique_dep": ";Salesforce Research", + "aff_unique_url": "https://www.cmu.edu;https://research.salesforce.com", + "aff_unique_abbr": "CMU;Salesforce", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.438", + "title": "Calibrating Factual Knowledge in Pretrained Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Previous literature has proved that Pretrained Language Models (PLMs) can store factual knowledge. However, we find that facts stored in the PLMs are not always correct. It motivates us to explore a fundamental question: How do we calibrate factual knowledge in PLMs without re-training from scratch? In this work, we propose a simple and lightweight method CaliNet to achieve this goal. To be specific, we first detect whether PLMs can learn the right facts via a contrastive score between right and fake facts. If not, we then use a lightweight method to add and adapt new parameters to specific factual texts. Experiments on the knowledge probing task show the calibration effectiveness and efficiency. In addition, through closed-book question answering, we find that the calibrated PLM possesses knowledge generalization ability after finetuning.Beyond the calibration performance, we further investigate and visualize the knowledge calibration mechanism.", + "author": "Qingxiu Dong; Damai Dai; Yifan Song; Jingjing Xu; Zhifang Sui; Lei Li", + "authorids": "/q/qingxiu-dong/; /d/damai-dai/; /y/yifan-song/; /j/jingjing-xu/; /z/zhifang-sui/; /l/lei-li/", + "bibtex": "@inproceedings{dong-etal-2022-calibrating,\n title = \"Calibrating Factual Knowledge in Pretrained Language Models\",\n author = \"Dong, Qingxiu and\n Dai, Damai and\n Song, Yifan and\n Xu, Jingjing and\n Sui, Zhifang and\n Li, Lei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.438/\",\n doi = \"10.18653/v1/2022.findings-emnlp.438\",\n pages = \"5937--5947\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.438.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.438/", + "pdf_size": 680578, + "gs_citation": 118, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7024471273157541260&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "MOE Key Lab of Computational Linguistics, School of Computer Science, Peking University; MOE Key Lab of Computational Linguistics, School of Computer Science, Peking University; MOE Key Lab of Computational Linguistics, School of Computer Science, Peking University; Shanghai AI Lab; MOE Key Lab of Computational Linguistics, School of Computer Science, Peking University; University of California, Santa Barbara", + "aff_domain": "stu.pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;cs.ucsb.edu", + "email": "stu.pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;cs.ucsb.edu", + "github": "https://github.com/dqxiu/CaliNet", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;2", + "aff_unique_norm": "Peking University;Shanghai AI Lab;University of California, Santa Barbara", + "aff_unique_dep": "School of Computer Science;;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.shanghaiailab.com;https://www.ucsb.edu", + "aff_unique_abbr": "PKU;SAIL;UCSB", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Santa Barbara", + "aff_country_unique_index": "0;0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-industry.14", + "title": "Calibrating Imbalanced Classifiers with Focal Loss: An Empirical Study", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Imbalanced data distribution is a practical and common challenge in building production-level machine learning (ML) models in industry, where data usually exhibits long-tail distributions. For instance, in virtual AI Assistants, such as Google Assistant, Amazon Alexa and Apple Siri, the \u201cplay music\u201d or \u201cset timer\u201d utterance is exposed to an order of magnitude more traffic than other skills. This can easily cause trained models to overfit to the majority classes, categories or intents, lead to model miscalibration. The uncalibrated models output unreliable (mostly overconfident) predictions, which are at high risk of affecting downstream decision-making systems. In this work, we study the calibration of production models in the industry use-case of predicting product return reason codes in customer service conversations of an online retail store; The returns reasons also exhibit class imbalance. To alleviate the resulting miscalibration in the production ML model, we streamline the model development and deployment using focal loss (CITATION).We empirically show the effectiveness of model training with focal loss in learning better calibrated models, as compared to standard cross-entropy loss. Better calibration, in turn, enables better control of the precision-recall trade-off for the models deployed in production.", + "author": "Cheng Wang; Jorge Balazs; Gy\u00f6rgy Szarvas; Patrick Ernst; Lahari Poddar; Pavel Danchenko", + "authorids": "/c/cheng-wang/; /j/jorge-balazs/; /g/gyorgy-szarvas/; /p/patrick-ernst/; /l/lahari-poddar/; /p/pavel-danchenko/", + "bibtex": "@inproceedings{wang-etal-2022-calibrating,\n title = \"Calibrating Imbalanced Classifiers with Focal Loss: An Empirical Study\",\n author = {Wang, Cheng and\n Balazs, Jorge and\n Szarvas, Gy{\\\"o}rgy and\n Ernst, Patrick and\n Poddar, Lahari and\n Danchenko, Pavel},\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.14/\",\n doi = \"10.18653/v1/2022.emnlp-industry.14\",\n pages = \"145--153\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.14.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.14/", + "pdf_size": 1098339, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2130419014371701630&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Amazon; Amazon; Amazon; Amazon; Amazon; Amazon", + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Amazon.com, Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.629", + "title": "Calibrating Student Models for Emotion-related Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge Distillation (KD) is an effective method to transfer knowledge from one network (a.k.a. teacher) to another (a.k.a. student). In this paper, we study KD on the emotion-related tasks from a new perspective: calibration. We further explore the impact of the mixup data augmentation technique on the distillation objective and propose to use a simple yet effective mixup method informed by training dynamics for calibrating the student models. Underpinned by the regularization impact of the mixup process by providing better training signals to the student models using training dynamics, our proposed mixup strategy gradually enhances the student model\u2019s calibration while effectively improving its performance. We evaluate the calibration of pre-trained language models through knowledge distillation over three tasks of emotion detection, sentiment analysis, and empathy detection. By conducting extensive experiments on different datasets, with both in-domain and out-of-domain test sets, we demonstrate that student models distilled from teacher models trained using our proposed mixup method obtained the lowest Expected Calibration Errors (ECEs) and best performance on both in-domain and out-of-domain test sets.", + "author": "Mahshid Hosseini; Cornelia Caragea", + "authorids": "/m/mahshid-hosseini/; /c/cornelia-caragea/", + "bibtex": "@inproceedings{hosseini-caragea-2022-calibrating,\n title = \"Calibrating Student Models for Emotion-related Tasks\",\n author = \"Hosseini, Mahshid and\n Caragea, Cornelia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.629/\",\n doi = \"10.18653/v1/2022.emnlp-main.629\",\n pages = \"9266--9278\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.629.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.629/", + "pdf_size": 678534, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7306197458162808412&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Computer Science, University of Illinois at Chicago; Computer Science, University of Illinois at Chicago", + "aff_domain": "uic.edu;uic.edu", + "email": "uic.edu;uic.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Illinois at Chicago", + "aff_unique_dep": "Computer Science", + "aff_unique_url": "https://www.uic.edu", + "aff_unique_abbr": "UIC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chicago", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.209", + "title": "Calibrating Trust of Multi-Hop Question Answering Systems with Decompositional Probes", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multi-hop Question Answering (QA) is a challenging task since it requires an accurate aggregation of information from multiple context paragraphs and a thorough understanding of the underlying reasoning chains. Recent work in multi-hop QA has shown that performance can be boosted by first decomposing the questions into simpler, single-hop questions. In this paper, we explore one additional utility of the multi-hop decomposition from the perspective of explainable NLP: to create explanation by probing a neural QA model with them. We hypothesize that in doing so, users will be better able to predict when the underlying QA system will give the correct answer. Through human participant studies, we verify that exposing the decomposition probes and answers to the probes to users can increase their ability to predict system performance on a question instance basis. We show that decomposition is an effective form of probing QA systems as well as a promising approach to explanation generation. In-depth analyses show the need for improvements in decomposition systems.", + "author": "Kaige Xie; Sarah Wiegreffe; Mark Riedl", + "authorids": "/k/kaige-xie/; /s/sarah-wiegreffe/; /m/mark-riedl/", + "bibtex": "@inproceedings{xie-etal-2022-calibrating,\n title = \"Calibrating Trust of Multi-Hop Question Answering Systems with Decompositional Probes\",\n author = \"Xie, Kaige and\n Wiegreffe, Sarah and\n Riedl, Mark\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.209/\",\n doi = \"10.18653/v1/2022.findings-emnlp.209\",\n pages = \"2888--2902\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.209.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.209/", + "pdf_size": 1644696, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4924102699864766877&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "School of Interactive Computing, Georgia Institute of Technology; Allen Institute for Artificial Intelligence; School of Interactive Computing, Georgia Institute of Technology", + "aff_domain": "gatech.edu;gmail.com;gatech.edu", + "email": "gatech.edu;gmail.com;gatech.edu", + "github": "https://github.com/kaigexie/decompositional-probing", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Georgia Institute of Technology;Allen Institute for Artificial Intelligence", + "aff_unique_dep": "School of Interactive Computing;", + "aff_unique_url": "https://www.gatech.edu;https://allenai.org", + "aff_unique_abbr": "Georgia Tech;AI2", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Atlanta;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.170", + "title": "Calibrating Zero-shot Cross-lingual (Un-)structured Predictions", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We investigate model calibration in the setting of zero-shot cross-lingual transfer with large-scale pre-trained language models. The level of model calibration is an important metric for evaluating the trustworthiness of predictive models. There exists an essential need for model calibration when natural language models are deployed in critical tasks. We study different post-training calibration methods in structured and unstructured prediction tasks. We find that models trained with data from the source language become less calibrated when applied to the target language and that calibration errors increase with intrinsic task difficulty and relative sparsity of training data. Moreover, we observe a potential connection between the level of calibration error and an earlier proposed measure of the distance from English to other languages. Finally, our comparison demonstrates that among other methods Temperature Scaling (TS) generalizes well to distant languages, but TS fails to calibrate more complex confidence estimation in structured predictions compared to more expressive alternatives like Gaussian Process Calibration.", + "author": "Zhengping Jiang; Anqi Liu; Benjamin Van Durme", + "authorids": "/z/zheng-ping-jiang/; /a/anqi-liu/; /b/benjamin-van-durme/", + "bibtex": "@inproceedings{jiang-etal-2022-calibrating,\n title = \"Calibrating Zero-shot Cross-lingual (Un-)structured Predictions\",\n author = \"Jiang, Zhengping and\n Liu, Anqi and\n Van Durme, Benjamin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.170/\",\n doi = \"10.18653/v1/2022.emnlp-main.170\",\n pages = \"2648--2674\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.170.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.170/", + "pdf_size": 923197, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9620132644781502494&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Johns Hopkins University; Johns Hopkins University; Johns Hopkins University", + "aff_domain": "jhu.edu;jhu.edu;jhu.edu", + "email": "jhu.edu;jhu.edu;jhu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Johns Hopkins University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.jhu.edu", + "aff_unique_abbr": "JHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.178", + "title": "Calibration Meets Explanation: A Simple and Effective Approach for Model Confidence Estimates", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Calibration strengthens the trustworthiness of black-box models by producing better accurate confidence estimates on given examples. However, little is known about if model explanations can help confidence calibration. Intuitively, humans look at important features attributions and decide whether the model is trustworthy. Similarly, the explanations may tell us when the model might know and when it does not. Inspired by this, we propose a method named CME that leverages model explanations to make the model less confident with non-inductive attributions. The idea is that when the model is not highly confident, it is difficult to identify strong indications of any class, and the tokens accordingly do not have high attribution scores for any class and vice versa. We conduct extensive experiments on six datasets with two popular pre-trained language models in the in-domain and out-of-domain settings. The results show that CME improves calibration performance in all settings. The expected calibration errors are further reduced when combined with temperature scaling. Our findings highlight that model explanations can help calibrate posterior estimates.", + "author": "Dongfang Li; Baotian Hu; Qingcai Chen", + "authorids": "/d/dongfang-li/; /b/baotian-hu/; /q/qingcai-chen/", + "bibtex": "@inproceedings{li-etal-2022-calibration,\n title = \"Calibration Meets Explanation: A Simple and Effective Approach for Model Confidence Estimates\",\n author = \"Li, Dongfang and\n Hu, Baotian and\n Chen, Qingcai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.178/\",\n doi = \"10.18653/v1/2022.emnlp-main.178\",\n pages = \"2775--2784\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.178.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.178/", + "pdf_size": 338020, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4516151945686774731&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Harbin Institute of Technology (Shenzhen), Shenzhen, China+Peng Cheng Laboratory, Shenzhen, China; Harbin Institute of Technology (Shenzhen), Shenzhen, China+Peng Cheng Laboratory, Shenzhen, China; Harbin Institute of Technology (Shenzhen), Shenzhen, China+Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "gmail.com;hit.edu.cn;hit.edu.cn", + "email": "gmail.com;hit.edu.cn;hit.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "Harbin Institute of Technology;Peng Cheng Laboratory", + "aff_unique_dep": ";", + "aff_unique_url": "http://en.hhit.edu.cn/;", + "aff_unique_abbr": "HIT;", + "aff_campus_unique_index": "0+0;0+0;0+0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.112", + "title": "Can AMR Assist Legal and Logical Reasoning?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Abstract Meaning Representation (AMR) has been shown to be useful for many downstream tasks. In this work, we explore the use of AMR for legal and logical reasoning. Specifically, we investigate if AMR can help capture logical relationships on multiple choice question answering (MCQA) tasks. We propose neural architectures that utilize linearised AMR graphs in combination with pre-trained language models. While these models are not able to outperform text-only baselines, they correctly solve different instances than the text models, suggesting complementary abilities. Error analysis further reveals that AMR parsing quality is the most prominent challenge, especially regarding inputs with multiple sentences. We conduct a theoretical analysis of how logical relations are represented in AMR and conclude it might be helpful in some logical statements but not for others.", + "author": "Nikolaus Schrack; Ruixiang Cui; Hugo L\u00f3pez; Daniel Hershcovich", + "authorids": "/n/nikolaus-schrack/; /r/ruixiang-cui/; /h/hugo-lopez/; /d/daniel-hershcovich/", + "bibtex": "@inproceedings{schrack-etal-2022-amr,\n title = \"Can {AMR} Assist Legal and Logical Reasoning?\",\n author = \"Schrack, Nikolaus and\n Cui, Ruixiang and\n L{\\'o}pez, Hugo and\n Hershcovich, Daniel\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.112/\",\n doi = \"10.18653/v1/2022.findings-emnlp.112\",\n pages = \"1555--1568\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.112.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.112/", + "pdf_size": 258141, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1658643617258803407&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, University of Copenhagen; Department of Computer Science, University of Copenhagen; DTU Compute, Technical University of Denmark; Department of Computer Science, University of Copenhagen", + "aff_domain": "di.ku.dk;di.ku.dk;dtu.dk;di.ku.dk", + "email": "di.ku.dk;di.ku.dk;dtu.dk;di.ku.dk", + "github": "https://github.com/nschrack/fusion", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "University of Copenhagen;Technical University of Denmark", + "aff_unique_dep": "Department of Computer Science;DTU Compute", + "aff_unique_url": "https://www.ku.dk;https://www.dtu.dk", + "aff_unique_abbr": "UCPH;DTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Denmark" + }, + { + "id": "2022.findings-emnlp.147", + "title": "Can Language Models Serve as Temporal Knowledge Bases?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent progress regarding the use of language models (LMs) as knowledge bases (KBs) has shown that language models can act as structured knowledge bases for storing relational facts. However, most existing works only considered the LM-as-KB paradigm in a static setting, which ignores the analysis of temporal dynamics of world knowledge. Furthermore, a basic function of KBs, i.e., the ability to store conflicting information (i.e., 1-N, N-1, and N-M relations), is underexplored. In this paper, we formulate two practical requirements for treating LMs as temporal KBs: (i) The capacity to store temporally-scoped knowledge that contains conflicting information and (ii) the ability to use stored knowledge for temporally-scoped knowledge queries. We introduce a new dataset called LAMA-TK which is aimed at probing temporally-scoped knowledge, and investigate the two above requirements to explore the LM-as-KB paradigm in the temporal domain. On the one hand, experiments show that LMs can memorize millions of temporally-scoped facts with relatively high accuracy and transfer stored knowledge to temporal knowledge queries, thereby expanding the LM-as-KB paradigm to the temporal domain. On the other hand, we show that memorizing conflicting information, which has been neglected by previous works, is still challenging for LMs and hinders the memorization of other unrelated one-to-one relationships.", + "author": "Ruilin Zhao; Feng Zhao; Guandong Xu; Sixiao Zhang; Hai Jin", + "authorids": "/r/ruilin-zhao/; /f/feng-zhao/; /g/guandong-xu/; /s/sixiao-zhang/; /h/hai-jin/", + "bibtex": "@inproceedings{zhao-etal-2022-language,\n title = \"Can Language Models Serve as Temporal Knowledge Bases?\",\n author = \"Zhao, Ruilin and\n Zhao, Feng and\n Xu, Guandong and\n Zhang, Sixiao and\n Jin, Hai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.147/\",\n doi = \"10.18653/v1/2022.findings-emnlp.147\",\n pages = \"2024--2037\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.147.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.147/", + "pdf_size": 314224, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=873932500937204504&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China+Data Science and Machine Intelligence Lab, University of Technology Sydney, Sydney, Australia; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China; Data Science and Machine Intelligence Lab, University of Technology Sydney, Sydney, Australia; Data Science and Machine Intelligence Lab, University of Technology Sydney, Sydney, Australia; National Engineering Research Center for Big Data Technology and System, Services Computing Technology and System Lab, Cluster and Grid Computing Lab, School of Computer Science and Technology, Huazhong University of Science and Technology, China", + "aff_domain": "hust.edu.cn;hust.edu.cn;uts.edu.au;gmail.com;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;uts.edu.au;gmail.com;hust.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;1;1;0", + "aff_unique_norm": "Huazhong University of Science and Technology;University of Technology Sydney", + "aff_unique_dep": "School of Computer Science and Technology;Data Science and Machine Intelligence Lab", + "aff_unique_url": "http://www.hust.edu.cn;https://www.uts.edu.au", + "aff_unique_abbr": "HUST;UTS", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Sydney", + "aff_country_unique_index": "0+1;0;1;1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "2022.emnlp-main.768", + "title": "Can Transformers Reason in Fragments of Natural Language?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "State-of-the-art deep-learning-based approaches to Natural Language Processing (NLP) are credited with various capabilities that involve reasoning with natural language texts. %However, reasoning in this setting is often ill-defined and shallow. In this paper we carry out a large-scale empirical study investigating the detection of formally valid inferences in controlled fragments of natural language for which the satisfiability problem becomes increasingly complex. We find that, while transformer-based language models perform surprisingly well in these scenarios, a deeper analysis reveals that they appear to overfit to superficial patterns in the data rather than acquiring the logical principles governing the reasoning in these fragments.", + "author": "Viktor Schlegel; Kamen Pavlov; Ian Pratt-Hartmann", + "authorids": "/v/viktor-schlegel/; /k/kamen-pavlov/; /i/ian-pratt-hartmann/", + "bibtex": "@inproceedings{schlegel-etal-2022-transformers,\n title = \"Can Transformers Reason in Fragments of Natural Language?\",\n author = \"Schlegel, Viktor and\n Pavlov, Kamen and\n Pratt-Hartmann, Ian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.768/\",\n doi = \"10.18653/v1/2022.emnlp-main.768\",\n pages = \"11184--11199\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.768.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.768/", + "pdf_size": 317083, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7273829076540516405&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "ASUS Intelligent Cloud Services (AICS), Singapore + Department of Computer Science, University of Manchester, United Kingdom; Department of Computer Science, University of Manchester, United Kingdom; Department of Computer Science, University of Manchester, United Kingdom + Instytut Informatyki Uniwersytet Opolski, Poland", + "aff_domain": "asus.com;gmail.com;manchester.ac.uk", + "email": "asus.com;gmail.com;manchester.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;1+2", + "aff_unique_norm": "ASUS Intelligent Cloud Services;University of Manchester;Uniwersytet Opolski", + "aff_unique_dep": "Intelligent Cloud Services;Department of Computer Science;Instytut Informatyki", + "aff_unique_url": ";https://www.manchester.ac.uk;", + "aff_unique_abbr": "AICS;UoM;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;1+2", + "aff_country_unique": "Singapore;United Kingdom;Poland" + }, + { + "id": "2022.emnlp-main.127", + "title": "Can Visual Context Improve Automatic Speech Recognition for an Embodied Agent?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The usage of automatic speech recognition (ASR) systems are becoming omnipresent ranging from personal assistant to chatbots, home, and industrial automation systems, etc. Modern robots are also equipped with ASR capabilities for interacting with humans as speech is the most natural interaction modality. However, ASR in robots faces additional challenges as compared to a personal assistant. Being an embodied agent, a robot must recognize the physical entities around it and therefore reliably recognize the speech containing the description of such entities. However, current ASR systems are often unable to do so due to limitations in ASR training, such as generic datasets and open-vocabulary modeling. Also, adverse conditions during inference, such as noise, accented, and far-field speech makes the transcription inaccurate. In this work, we present a method to incorporate a robot\u2019s visual information into an ASR system and improve the recognition of a spoken utterance containing a visible entity. Specifically, we propose a new decoder biasing technique to incorporate the visual context while ensuring the ASR output does not degrade for incorrect context. We achieve a 59% relative reduction in WER from an unmodified ASR system.", + "author": "Pradip Pramanick; Chayan Sarkar", + "authorids": "/p/pradip-pramanick/; /c/chayan-sarkar/", + "bibtex": "@inproceedings{pramanick-sarkar-2022-visual,\n title = \"Can Visual Context Improve Automatic Speech Recognition for an Embodied Agent?\",\n author = \"Pramanick, Pradip and\n Sarkar, Chayan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.127/\",\n doi = \"10.18653/v1/2022.emnlp-main.127\",\n pages = \"1946--1957\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.127.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.127/", + "pdf_size": 724725, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2905497875791757660&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "TCS Research, India; TCS Research, India", + "aff_domain": "tcs.com;tcs.com", + "email": "tcs.com;tcs.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Tata Consultancy Services", + "aff_unique_dep": "Research", + "aff_unique_url": "https://www.tcs.com", + "aff_unique_abbr": "TCS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.findings-emnlp.38", + "title": "Can language models learn from explanations in context?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Language Models (LMs) can perform new tasks by adapting to a few in-context examples. For humans, explanations that connect examples to task principles can improve learning. We therefore investigate whether explanations of few-shot examples can help LMs. We annotate questions from 40 challenging tasks with answer explanations, and various matched control explanations. We evaluate how different types of explanations, instructions, and controls affect zero- and few-shot performance. We analyze these results using statistical multilevel modeling techniques that account for the nested dependencies among conditions, tasks, prompts, and models. We find that explanations can improve performance\u2014even without tuning. Furthermore, explanations hand-tuned for performance on a small validation set offer substantially larger benefits, and building a prompt by selecting examples and explanations together substantially improves performance over selecting examples alone. Finally, even untuned explanations outperform carefully matched controls, suggesting that the benefits are due to the link between an example and its explanation, rather than lower-level features. However, only large models benefit. In summary, explanations can support the in-context learning of large LMs on challenging tasks.", + "author": "Andrew Lampinen; Ishita Dasgupta; Stephanie Chan; Kory Mathewson; Mh Tessler; Antonia Creswell; James McClelland; Jane Wang; Felix Hill", + "authorids": "/a/andrew-lampinen/; /i/ishita-dasgupta/; /s/stephanie-chan/; /k/kory-mathewson/; /m/mh-tessler/; /a/antonia-creswell/; /j/james-mcclelland/; /j/jane-wang/; /f/felix-hill/", + "bibtex": "@inproceedings{lampinen-etal-2022-language,\n title = \"Can language models learn from explanations in context?\",\n author = \"Lampinen, Andrew and\n Dasgupta, Ishita and\n Chan, Stephanie and\n Mathewson, Kory and\n Tessler, Mh and\n Creswell, Antonia and\n McClelland, James and\n Wang, Jane and\n Hill, Felix\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.38/\",\n doi = \"10.18653/v1/2022.findings-emnlp.38\",\n pages = \"537--563\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.38.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.38/", + "pdf_size": 394012, + "gs_citation": 301, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10862635776889386919&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind", + "aff_domain": ";;;;;;;;", + "email": ";;;;;;;;", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "DeepMind", + "aff_unique_dep": "", + "aff_unique_url": "https://deepmind.com", + "aff_unique_abbr": "DeepMind", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.318", + "title": "Candidate Soups: Fusing Candidate Results Improves Translation Quality for Non-Autoregressive Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Non-autoregressive translation (NAT) model achieves a much faster inference speed than the autoregressive translation (AT) model because it can simultaneously predict all tokens during inference. However, its translation quality suffers from degradation compared to AT. And existing NAT methods only focus on improving the NAT model\u2019s performance but do not fully utilize it. In this paper, we propose a simple but effective method called \u201cCandidate Soups,\u201d which can obtain high-quality translations while maintaining the inference speed of NAT models. Unlike previous approaches that pick the individual result and discard the remainders, Candidate Soups (CDS) can fully use the valuable information in the different candidate translations through model uncertainty. Extensive experiments on two benchmarks (WMT\u201914 EN\u2013DE and WMT\u201916 EN\u2013RO) demonstrate the effectiveness and generality of our proposed method, which can significantly improve the translation quality of various base models. More notably, our best variant outperforms the AT model on three translation tasks with 7.6\u00d7 speedup.", + "author": "Huanran Zheng; Wei Zhu; Pengfei Wang; Xiaoling Wang", + "authorids": "/h/huanran-zheng/; /w/wei-zhu/; /p/pengfei-wang/; /x/xiaoling-wang/", + "bibtex": "@inproceedings{zheng-etal-2022-candidate,\n title = \"Candidate Soups: Fusing Candidate Results Improves Translation Quality for Non-Autoregressive Translation\",\n author = \"Zheng, Huanran and\n Zhu, Wei and\n Wang, Pengfei and\n Wang, Xiaoling\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.318/\",\n doi = \"10.18653/v1/2022.emnlp-main.318\",\n pages = \"4811--4823\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.318.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.318/", + "pdf_size": 460827, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2423641184163255014&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff": "East China Normal University; East China Normal University; East China Normal University; East China Normal University", + "aff_domain": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;stu.ecnu.edu.cn;cs.ecnu.edu.cn", + "email": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;stu.ecnu.edu.cn;cs.ecnu.edu.cn", + "github": "https://github.com/boom-R123/Candidate_Soups", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "East China Normal University", + "aff_unique_dep": "", + "aff_unique_url": "http://www.ecnu.edu.cn", + "aff_unique_abbr": "ECNU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.226", + "title": "CapOnImage: Context-driven Dense-Captioning on Image", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing image captioning systems are dedicated to generating narrative captions for images, which are spatially detached from theimage in presentation. However, texts can also be used as decorations on the image to highlight the key points and increase theattractiveness of images. In this work, we introduce a new taskcalled captioning on image (CapOnImage), which aims to generatedense captions at different locations of the image based on contextual information. To fully exploit the surrounding visual context togenerate the most suitable caption for each location, we propose amulti-modal pre-training model with multi-level pre-training tasksthat progressively learn the correspondence between texts and image locations from easy to difficult. Since the model may generateredundant captions for nearby locations, we further enhance thelocation embedding with neighbor locations as context. For thisnew task, we also introduce a large-scale benchmark called CapOnImage2M, which contains 2.1 million product images, each with anaverage of 4.8 spatially localized captions. Compared with other image captioning model variants, our model achieves the best resultsin both captioning accuracy and diversity aspects.", + "author": "Yiqi Gao; Xinglin Hou; Yuanmeng Zhang; Tiezheng Ge; Yuning Jiang; Peng Wang", + "authorids": "/y/yiqi-gao/; /x/xinglin-hou/; /y/yuanmeng-zhang/; /t/tiezheng-ge/; /y/yuning-jiang/; /p/peng-wang/", + "bibtex": "@inproceedings{gao-etal-2022-caponimage,\n title = \"{C}ap{O}n{I}mage: Context-driven Dense-Captioning on Image\",\n author = \"Gao, Yiqi and\n Hou, Xinglin and\n Zhang, Yuanmeng and\n Ge, Tiezheng and\n Jiang, Yuning and\n Wang, Peng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.226/\",\n doi = \"10.18653/v1/2022.emnlp-main.226\",\n pages = \"3449--3465\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.226.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.226/", + "pdf_size": 9708413, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2250694840938953511&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "School of Computer Science, Northwestern Polytechnical University; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; School of Computer Science, Northwestern Polytechnical University", + "aff_domain": "mail.nwpu.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;nwpu.edu.cn", + "email": "mail.nwpu.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;nwpu.edu.cn", + "github": "https://github.com/YqGao716/CapOnImage", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;0", + "aff_unique_norm": "Northwestern Polytechnical University;Alibaba Group", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "https://www.nwpu.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "NWPU;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.336", + "title": "Capturing Global Structural Information in Long Document Question Answering with Compressive Graph Selector Network", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Long document question answering is a challenging task due to its demands for complex reasoning over long text. Previous works usually take long documents as non-structured flat texts or only consider the local structure in long documents. However, these methods usually ignore the global structure of the long document, which is essential for long-range understanding. To tackle this problem, we propose Compressive Graph Selector Network (CGSN) to capture the global structure in a compressive and iterative manner. The proposed model mainly focuses on the evidence selection phase of long document question answering. Specifically, it consists of three modules: local graph network, global graph network and evidence memory network. Firstly, the local graph network builds the graph structure of the chunked segment in token, sentence, paragraph and segment levels to capture the short-term dependency of the text. Secondly, the global graph network selectively receives the information of each level from the local graph, compresses them into the global graph nodes and applies graph attention to the global graph nodes to build the long-range reasoning over the entire text in an iterative way. Thirdly, the evidence memory network is designed to alleviate the redundancy problem in the evidence selection by saving the selected result in the previous steps. Extensive experiments show that the proposed model outperforms previous methods on two datasets.", + "author": "Yuxiang Nie; Heyan Huang; Wei Wei; Xian-Ling Mao", + "authorids": "/y/yuxiang-nie/; /h/he-yan-huang/; /w/wei-wei/; /x/xian-ling-mao/", + "bibtex": "@inproceedings{nie-etal-2022-capturing,\n title = \"Capturing Global Structural Information in Long Document Question Answering with Compressive Graph Selector Network\",\n author = \"Nie, Yuxiang and\n Huang, Heyan and\n Wei, Wei and\n Mao, Xian-Ling\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.336/\",\n doi = \"10.18653/v1/2022.emnlp-main.336\",\n pages = \"5036--5047\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.336.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.336/", + "pdf_size": 450042, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2682894555129133320&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "School of Computer Science and Technology, Beijing Institute of Technology+Beijing Engineering Research Center of High Volume Language Information Processing and Cloud Computing Applications+Beijing Institute of Technology Southeast Academy of Information Technology; School of Computer Science and Technology, Beijing Institute of Technology+Beijing Engineering Research Center of High Volume Language Information Processing and Cloud Computing Applications+Beijing Institute of Technology Southeast Academy of Information Technology; Huazhong University of Science and Technology; School of Computer Science and Technology, Beijing Institute of Technology+Beijing Engineering Research Center of High Volume Language Information Processing and Cloud Computing Applications+Beijing Institute of Technology Southeast Academy of Information Technology", + "aff_domain": "bit.edu.cn;bit.edu.cn;hust.edu.cn;bit.edu.cn", + "email": "bit.edu.cn;bit.edu.cn;hust.edu.cn;bit.edu.cn", + "github": "https://github.com/JerrryNie/CGSN", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1+0;0+1+0;2;0+1+0", + "aff_unique_norm": "Beijing Institute of Technology;Beijing Engineering Research Center;Huazhong University of Science and Technology", + "aff_unique_dep": "School of Computer Science and Technology;High Volume Language Information Processing and Cloud Computing Applications;", + "aff_unique_url": "http://www.bit.edu.cn/;;http://www.hust.edu.cn", + "aff_unique_abbr": "BIT;;HUST", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Southeast", + "aff_country_unique_index": "0+0+0;0+0+0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.507", + "title": "Capturing Topic Framing via Masked Language Modeling", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Differential framing of issues can lead to divergent world views on important issues. This is especially true in domains where the information presented can reach a large audience, such as traditional and social media. Scalable and reliable measurement of such differential framing is an important first step in addressing them. In this work, based on the intuition that framing affects the tone and word choices in written language, we propose a framework for modeling the differential framing of issues through masked token prediction via large-scale fine-tuned language models (LMs). Specifically, we explore three key factors for our framework: 1) prompt generation methods for the masked token prediction; 2) methods for normalizing the output of fine-tuned LMs; 3) robustness to the choice of pre-trained LMs used for fine-tuning. Through experiments on a dataset of articles from traditional media outlets covering five diverse and politically polarized topics, we show that our framework can capture differential framing of these topics with high reliability.", + "author": "Xiaobo Guo; Weicheng Ma; Soroush Vosoughi", + "authorids": "/x/xiaobo-guo/; /w/weicheng-ma/; /s/soroush-vosoughi/", + "bibtex": "@inproceedings{guo-etal-2022-capturing,\n title = \"Capturing Topic Framing via Masked Language Modeling\",\n author = \"Guo, Xiaobo and\n Ma, Weicheng and\n Vosoughi, Soroush\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.507/\",\n doi = \"10.18653/v1/2022.findings-emnlp.507\",\n pages = \"6811--6825\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.507.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.507/", + "pdf_size": 895224, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15435671533595521078&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.394", + "title": "Cards Against AI: Predicting Humor in a Fill-in-the-blank Party Game", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Humor is an inherently social phenomenon, with humorous utterances shaped by what is socially and culturally accepted. Understanding humor is an important NLP challenge, with many applications to human-computer interactions. In this work we explore humor in the context of Cards Against Humanity \u2013 a party game where players complete fill-in-the-blank statements using cards that can be offensive or politically incorrect.We introduce a novel dataset of 300,000 online games of Cards Against Humanity, including 785K unique jokes, analyze it and provide insights. We trained machine learning models to predict the winning joke per game, achieving performance twice as good (20%) as random, even without any user information.On the more difficult task of judging novel cards, we see the models\u2019 ability to generalize is moderate. Interestingly, we find that our models are primarily focused on punchline card, with the context having little impact.Analyzing feature importance, we observe that short, crude, juvenile punchlines tend to win.", + "author": "Dan Ofer; Dafna Shahaf", + "authorids": "/d/dan-ofer/; /d/dafna-shahaf/", + "bibtex": "@inproceedings{ofer-shahaf-2022-cards,\n title = \"Cards Against {AI}: Predicting Humor in a Fill-in-the-blank Party Game\",\n author = \"Ofer, Dan and\n Shahaf, Dafna\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.394/\",\n doi = \"10.18653/v1/2022.findings-emnlp.394\",\n pages = \"5397--5403\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.394.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.394/", + "pdf_size": 315288, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12726791093957064649&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "The Hebrew University of Jerusalem; The Hebrew University of Jerusalem", + "aff_domain": "mail.huji.ac.il;cs.huji.ac.il", + "email": "mail.huji.ac.il;cs.huji.ac.il", + "github": "https://github.com/ddofer/CAH5397", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The Hebrew University of Jerusalem", + "aff_unique_dep": "", + "aff_unique_url": "https://www.huji.ac.il", + "aff_unique_abbr": "HUJI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.emnlp-main.438", + "title": "Cascading Biases: Investigating the Effect of Heuristic Annotation Strategies on Data and Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Cognitive psychologists have documented that humans use cognitive heuristics, or mental shortcuts, to make quick decisions while expending less effort. While performing annotation work on crowdsourcing platforms, we hypothesize that such heuristic use among annotators cascades on to data quality and model robustness. In this work, we study cognitive heuristic use in the context of annotating multiple-choice reading comprehension datasets. We propose tracking annotator heuristic traces, where we tangibly measure low-effort annotation strategies that could indicate usage of various cognitive heuristics. We find evidence that annotators might be using multiple such heuristics, based on correlations with a battery of psychological tests. Importantly, heuristic use among annotators determines data quality along several dimensions: (1) known biased models, such as partial input models, more easily solve examples authoredby annotators that rate highly on heuristic use, (2) models trained on annotators scoring highly on heuristic use don\u2019t generalize as well, and (3) heuristic-seeking annotators tend to create qualitatively less challenging examples. Our findings suggest that tracking heuristic usage among annotators can potentially help with collecting challenging datasets and diagnosing model biases.", + "author": "Chaitanya Malaviya; Sudeep Bhatia; Mark Yatskar", + "authorids": "/c/chaitanya-malaviya/; /s/sudeep-bhatia/; /m/mark-yatskar/", + "bibtex": "@inproceedings{malaviya-etal-2022-cascading,\n title = \"Cascading Biases: Investigating the Effect of Heuristic Annotation Strategies on Data and Models\",\n author = \"Malaviya, Chaitanya and\n Bhatia, Sudeep and\n Yatskar, Mark\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.438/\",\n doi = \"10.18653/v1/2022.emnlp-main.438\",\n pages = \"6525--6540\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.438.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.438/", + "pdf_size": 1311992, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15398321886262432896&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "University of Pennsylvania; University of Pennsylvania; University of Pennsylvania", + "aff_domain": "upenn.edu;upenn.edu;upenn.edu", + "email": "upenn.edu;upenn.edu;upenn.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Pennsylvania", + "aff_unique_dep": "", + "aff_unique_url": "https://www.upenn.edu", + "aff_unique_abbr": "UPenn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.23", + "title": "Certified Error Control of Candidate Set Pruning for Two-Stage Relevance Ranking", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In information retrieval (IR), candidate set pruning has been commonly used to speed up two-stage relevance ranking. However, such an approach lacks accurate error control and often trades accuracy against computational efficiency in an empirical fashion, missing theoretical guarantees. In this paper, we propose the concept of certified error control of candidate set pruning for relevance ranking, which means that the test error after pruning is guaranteed to be controlled under a user-specified threshold with high probability. Both in-domain and out-of-domain experiments show that our method successfully prunes the first-stage retrieved candidate sets to improve the second-stage reranking speed while satisfying the pre-specified accuracy constraints in both settings. For example, on MS MARCO Passage v1, our method reduces the average candidate set size from 1000 to 27, increasing reranking speed by about 37 times, while keeping MRR@10 greater than a pre-specified value of 0.38 with about 90% empirical coverage. In contrast, empirical baselines fail to meet such requirements. Code and data are available at: https://github.com/alexlimh/CEC-Ranking.", + "author": "Minghan Li; Xinyu Zhang; Ji Xin; Hongyang Zhang; Jimmy Lin", + "authorids": "/m/minghan-li/; /x/xinyu-zhang/; /j/ji-xin/; /h/hongyang-zhang/; /j/jimmy-lin/", + "bibtex": "@inproceedings{li-etal-2022-certified,\n title = \"Certified Error Control of Candidate Set Pruning for Two-Stage Relevance Ranking\",\n author = \"Li, Minghan and\n Zhang, Xinyu and\n Xin, Ji and\n Zhang, Hongyang and\n Lin, Jimmy\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.23/\",\n doi = \"10.18653/v1/2022.emnlp-main.23\",\n pages = \"333--345\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.23.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.23/", + "pdf_size": 631368, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15828114060992473386&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 3, + "aff": "David R. Cheriton School of Computer Science, University of Waterloo; David R. Cheriton School of Computer Science, University of Waterloo; David R. Cheriton School of Computer Science, University of Waterloo; David R. Cheriton School of Computer Science, University of Waterloo; David R. Cheriton School of Computer Science, University of Waterloo", + "aff_domain": "uwaterloo.ca;uwaterloo.ca;uwaterloo.ca;uwaterloo.ca;uwaterloo.ca", + "email": "uwaterloo.ca;uwaterloo.ca;uwaterloo.ca;uwaterloo.ca;uwaterloo.ca", + "github": "https://github.com/alexlimh/CEC-Ranking", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of Waterloo", + "aff_unique_dep": "David R. Cheriton School of Computer Science", + "aff_unique_url": "https://uwaterloo.ca", + "aff_unique_abbr": "UWaterloo", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "2022.findings-emnlp.187", + "title": "Chaining Simultaneous Thoughts for Numerical Reasoning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Given that rich information is hidden behind ubiquitous numbers in text, numerical reasoning over text should be an essential skill of AI systems. To derive precise equations to solve numerical reasoning problems, previous work focused on modeling the structures of equations, and has proposed various structured decoders. Though structure modeling proves to be effective, these structured decoders construct a single equation in a pre-defined autoregressive order, potentially placing an unnecessary restriction on how a model should grasp the reasoning process. Intuitively, humans may have numerous pieces of thoughts popping up in no pre-defined order; thoughts are not limited to the problem at hand, and can even be concerned with other related problems. By comparing diverse thoughts and chaining relevant pieces, humans are less prone to errors. In this paper, we take this inspiration and propose CANTOR, a numerical reasoner that models reasoning steps using a directed acyclic graph where we produce diverse reasoning steps simultaneously without pre-defined decoding dependencies, and compare and chain relevant ones to reach a solution. Extensive experiments demonstrated the effectiveness of CANTOR under both fully-supervised and weakly-supervised settings.", + "author": "Zhihong Shao; Fei Huang; Minlie Huang", + "authorids": "/z/zhihong-shao/; /f/fei-huang/; /m/minlie-huang/", + "bibtex": "@inproceedings{shao-etal-2022-chaining,\n title = \"Chaining Simultaneous Thoughts for Numerical Reasoning\",\n author = \"Shao, Zhihong and\n Huang, Fei and\n Huang, Minlie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.187/\",\n doi = \"10.18653/v1/2022.findings-emnlp.187\",\n pages = \"2533--2547\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.187.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.187/", + "pdf_size": 973093, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11811463653917611083&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.382", + "title": "Challenges and Opportunities in Information Manipulation Detection: An Examination of Wartime Russian Media", + "track": "main", + "status": "finding", + "award": false, + "abstract": "NLP research on public opinion manipulation campaigns has primarily focused on detecting overt strategies such as fake news and disinformation. However, information manipulation in the ongoing Russia-Ukraine war exemplifies how governments and media also employ more nuanced strategies. We release a new dataset, VoynaSlov, containing 38M+ posts from Russian media outlets on Twitter and VKontakte, as well as public activity and responses, immediately preceding and during the 2022 Russia-Ukraine war. We apply standard and recently-developed NLP models on VoynaSlov to examine agenda setting, framing, and priming, several strategies underlying information manipulation, and reveal variation across media outlet control, social media platform, and time. Our examination of these media effects and extensive discussion of current approaches\u2019 limitations encourage further development of NLP models for understanding information manipulation in emerging crises, as well as other real-world and interdisciplinary tasks.", + "author": "Chan Young Park; Julia Mendelsohn; Anjalie Field; Yulia Tsvetkov", + "authorids": "/c/chan-young-park/; /j/julia-mendelsohn/; /a/anjalie-field/; /y/yulia-tsvetkov/", + "bibtex": "@inproceedings{park-etal-2022-challenges,\n title = \"Challenges and Opportunities in Information Manipulation Detection: An Examination of Wartime {R}ussian Media\",\n author = \"Park, Chan Young and\n Mendelsohn, Julia and\n Field, Anjalie and\n Tsvetkov, Yulia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.382/\",\n doi = \"10.18653/v1/2022.findings-emnlp.382\",\n pages = \"5209--5235\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.382.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.382/", + "pdf_size": 1002735, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7748208648934146283&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Carnegie Mellon University; University of Michigan; Stanford University; University of Washington", + "aff_domain": "cs.cmu.edu;umich.edu;stanford.edu;cs.washington.edu", + "email": "cs.cmu.edu;umich.edu;stanford.edu;cs.washington.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Carnegie Mellon University;University of Michigan;Stanford University;University of Washington", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.cmu.edu;https://www.umich.edu;https://www.stanford.edu;https://www.washington.edu", + "aff_unique_abbr": "CMU;UM;Stanford;UW", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.253", + "title": "Chapter Ordering in Novels", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Understanding narrative flow and text coherence in long-form documents (novels) remains an open problem in NLP.To gain insight, we explore the task of chapter ordering, reconstructing the original order of chapters in novel given a random permutation of the text. This can be seen as extending the well-known sentence ordering task to vastly larger documents: our task deals with over 9,000 novels with an average of twenty chapters each, versus standard sentence ordering datasets averaging only 5-8 sentences. We formulate the task of reconstructing order as a constraint solving problem, using minimum feedback arc set and traveling salesman problem optimization criteria, where the weights of the graph are generated based on models for character occurrences and chapter boundary detection, using relational chapter scores derived from RoBERTa. Our best methods yield a Spearman correlation of 0.59 on this novel and challenging task, substantially above baseline.", + "author": "Allen Kim; Steve Skiena", + "authorids": "/a/allen-kim/; /s/steven-skiena/", + "bibtex": "@inproceedings{kim-skiena-2022-chapter,\n title = \"Chapter Ordering in Novels\",\n author = \"Kim, Allen and\n Skiena, Steve\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.253/\",\n doi = \"10.18653/v1/2022.emnlp-main.253\",\n pages = \"3838--3848\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.253.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.253/", + "pdf_size": 363205, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15071608782151658554&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "Department of Computer Science, Stony Brook University, NY, USA; Department of Computer Science, Stony Brook University, NY, USA", + "aff_domain": "cs.stonybrook.edu;cs.stonybrook.edu", + "email": "cs.stonybrook.edu;cs.stonybrook.edu", + "github": "https://github.com/allenkim/chapter-ordering-in-novels3838", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Stony Brook University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.stonybrook.edu", + "aff_unique_abbr": "SBU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Stony Brook", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.565", + "title": "Character-centric Story Visualization via Visual Planning and Token Alignment", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Story visualization advances the traditional text-to-image generation by enabling multiple image generation based on a complete story. This task requires machines to 1) understand long text inputs, and 2) produce a globally consistent image sequence that illustrates the contents of the story. A key challenge of consistent story visualization is to preserve characters that are essential in stories. To tackle the challenge, we propose to adapt a recent work that augments VQ-VAE with a text-to-visual-token (transformer) architecture. Specifically, we modify the text-to-visual-token module with a two-stage framework: 1) character token planning model that predicts the visual tokens for characters only; 2) visual token completion model that generates the remaining visual token sequence, which is sent to VQ-VAE for finalizing image generations. To encourage characters to appear in the images, we further train the two-stage framework with a character-token alignment objective. Extensive experiments and evaluations demonstrate that the proposed method excels at preserving characters and can produce higher quality image sequences compared with the strong baselines.", + "author": "Hong Chen; Rujun Han; Te-Lin Wu; Hideki Nakayama; Nanyun Peng", + "authorids": "/h/hong-chen/; /r/rujun-han/; /t/te-lin-wu/; /h/hideki-nakayama/; /n/nanyun-peng/", + "bibtex": "@inproceedings{chen-etal-2022-character,\n title = \"Character-centric Story Visualization via Visual Planning and Token Alignment\",\n author = \"Chen, Hong and\n Han, Rujun and\n Wu, Te-Lin and\n Nakayama, Hideki and\n Peng, Nanyun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.565/\",\n doi = \"10.18653/v1/2022.emnlp-main.565\",\n pages = \"8259--8272\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.565.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.565/", + "pdf_size": 9387896, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1571810679664088917&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "The University of Tokyo1; AWS AI Labs3; University of California, Los Angeles2; The University of Tokyo1; University of California, Los Angeles2", + "aff_domain": "nlab.ci.i.u-tokyo.ac.jp;nlab.ci.i.u-tokyo.ac.jp;amazon.com;cs.ucla.edu;cs.ucla.edu", + "email": "nlab.ci.i.u-tokyo.ac.jp;nlab.ci.i.u-tokyo.ac.jp;amazon.com;cs.ucla.edu;cs.ucla.edu", + "github": "https://github.com/PlusLabNLP/VP-CSV", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;2", + "aff_unique_norm": "The University of Tokyo;Amazon Web Services;University of California, Los Angeles", + "aff_unique_dep": ";AWS AI Labs;", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://aws.amazon.com;https://www.ucla.edu", + "aff_unique_abbr": "UTokyo;AWS;UCLA", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;1;1;0;1", + "aff_country_unique": "Japan;United States" + }, + { + "id": "2022.emnlp-main.522", + "title": "Character-level White-Box Adversarial Attacks against Transformers via Attachable Subwords Substitution", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We propose the first character-level white-box adversarial attack method against transformer models. The intuition of our method comes from the observation that words are split into subtokens before being fed into the transformer models and the substitution between two close subtokens has a similar effect with the character modification. Our method mainly contains three steps. First, a gradient-based method is adopted to find the most vulnerable words in the sentence. Then we split the selected words into subtokens to replace the origin tokenization result from the transformer tokenizer. Finally, we utilize an adversarial loss to guide the substitution of attachable subtokens in which the Gumbel-softmax trick is introduced to ensure gradient propagation.Meanwhile, we introduce the visual and length constraint in the optimization process to achieve minimum character modifications.Extensive experiments on both sentence-level and token-level tasks demonstrate that our method could outperform the previous attack methods in terms of success rate and edit distance. Furthermore, human evaluation verifies our adversarial examples could preserve their origin labels.", + "author": "Aiwei Liu; Honghai Yu; Xuming Hu; Shu\u2019ang Li; Li Lin; Fukun Ma; Yawen Yang; Lijie Wen", + "authorids": "/a/aiwei-liu/; /h/honghai-yu/; /x/xuming-hu/; /s/shuang-li/; /l/li-lin/; /f/fukun-ma/; /y/yawen-yang/; /l/lijie-wen/", + "bibtex": "@inproceedings{liu-etal-2022-character,\n title = \"Character-level White-Box Adversarial Attacks against Transformers via Attachable Subwords Substitution\",\n author = \"Liu, Aiwei and\n Yu, Honghai and\n Hu, Xuming and\n Li, Shu{'}ang and\n Lin, Li and\n Ma, Fukun and\n Yang, Yawen and\n Wen, Lijie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.522/\",\n doi = \"10.18653/v1/2022.emnlp-main.522\",\n pages = \"7664--7676\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.522.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.522/", + "pdf_size": 690345, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12945161765816345974&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University; Tsinghua University", + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.266", + "title": "Cheater\u2019s Bowl: Human vs. Computer Search Strategies for Open-Domain QA", + "track": "main", + "status": "finding", + "award": false, + "abstract": "For humans and computers, the first step in answering an open-domain question is retrieving a set of relevant documents from a large corpus. However, the strategies that computers use fundamentally differ from those of humans. To better understand these differences, we design a gamified interface for data collection\u2014Cheater\u2019s Bowl\u2014where a human answers complex questions with access to both traditional and modern search tools. We collect a dataset of human search sessions, analyze human search strategies, and compare them to state-of-the-art multi-hop QA models. Humans query logically, apply dynamic search chains, and use world knowledge to boost searching. We demonstrate how human queries can improve the accuracy of existing systems and propose improving the future design of QA models.", + "author": "Wanrong He; Andrew Mao; Jordan Boyd-Graber", + "authorids": "/w/wanrong-he/; /a/andrew-mao/; /j/jordan-boyd-graber/", + "bibtex": "@inproceedings{he-etal-2022-cheaters,\n title = \"Cheater`s Bowl: Human vs. Computer Search Strategies for Open-Domain {QA}\",\n author = \"He, Wanrong and\n Mao, Andrew and\n Boyd-Graber, Jordan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.266/\",\n doi = \"10.18653/v1/2022.findings-emnlp.266\",\n pages = \"3627--3639\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.266.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.266/", + "pdf_size": 1104312, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14623983792272323911&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Tsinghua University\u2217; University of Maryland; University of Maryland", + "aff_domain": "gmail.com;terpmail.umd.edu;umiacs.umd.edu", + "email": "gmail.com;terpmail.umd.edu;umiacs.umd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Tsinghua University;University of Maryland", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www/umd.edu", + "aff_unique_abbr": "THU;UMD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.210", + "title": "CheckHARD: Checking Hard Labels for Adversarial Text Detection, Prediction Correction, and Perturbed Word Suggestion", + "track": "main", + "status": "finding", + "award": false, + "abstract": "An adversarial attack generates harmful text that fools a target model. More dangerously, this text is unrecognizable by humans. Existing work detects adversarial text and corrects a target\u2019s prediction by identifying perturbed words and changing them into their synonyms, but many benign words are also changed. In this paper, we directly detect adversarial text, correct the prediction, and suggest perturbed words by checking the change in the hard labels from the target\u2019s predictions after replacing a word with its transformation using a model that we call CheckHARD. The experiments demonstrate that CheckHARD outperforms existing work on various attacks, models, and datasets.", + "author": "Hoang-Quoc Nguyen-Son; Huy Quang Ung; Seira Hidano; Kazuhide Fukushima; Shinsaku Kiyomoto", + "authorids": "/h/hoang-quoc-nguyen-son/; /h/huy-quang-ung/; /s/seira-hidano/; /k/kazuhide-fukushima/; /s/shinsaku-kiyomoto/", + "bibtex": "@inproceedings{nguyen-son-etal-2022-checkhard,\n title = \"{C}heck{HARD}: Checking Hard Labels for Adversarial Text Detection, Prediction Correction, and Perturbed Word Suggestion\",\n author = \"Nguyen-Son, Hoang-Quoc and\n Ung, Huy Quang and\n Hidano, Seira and\n Fukushima, Kazuhide and\n Kiyomoto, Shinsaku\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.210/\",\n doi = \"10.18653/v1/2022.findings-emnlp.210\",\n pages = \"2903--2913\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.210.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.210/", + "pdf_size": 713484, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16585005460910855823&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "KDDI Research, Inc., Japan; KDDI Research, Inc., Japan; KDDI Research, Inc., Japan; KDDI Research, Inc., Japan; KDDI Research, Inc., Japan", + "aff_domain": "kddi.com;kddi.com;kddi.com;kddi.com;kddi.com", + "email": "kddi.com;kddi.com;kddi.com;kddi.com;kddi.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "KDDI Research, Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.kddi-research.com", + "aff_unique_abbr": "KDDI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.emnlp-main.284", + "title": "Chunk-based Nearest Neighbor Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Semi-parametric models, which augment generation with retrieval, have led to impressive results in language modeling and machine translation, due to their ability to retrieve fine-grained information from a datastore of examples. One of the most prominent approaches, kNN-MT, exhibits strong domain adaptation capabilities by retrieving tokens from domain-specific datastores (Khandelwal et al., 2021). However, kNN-MT requires an expensive retrieval operation for every single generated token, leading to a very low decoding speed (around 8 times slower than a parametric model). In this paper, we introduce a chunk-based kNN-MT model which retrieves chunks of tokens from the datastore, instead of a single token. We propose several strategies for incorporating the retrieved chunks into the generation process, and for selecting the steps at which the model needs to search for neighbors in the datastore. Experiments on machine translation in two settings, static and \u201con-the-fly\u201d domain adaptation, show that the chunk-based kNN-MT model leads to significant speed-ups (up to 4 times) with only a small drop in translation quality.", + "author": "Pedro Henrique Martins; Zita Marinho; Andr\u00e9 F. T. Martins", + "authorids": "/p/pedro-henrique-martins/; /z/zita-marinho/; /a/andre-f-t-martins/", + "bibtex": "@inproceedings{martins-etal-2022-chunk,\n title = \"Chunk-based Nearest Neighbor Machine Translation\",\n author = \"Martins, Pedro Henrique and\n Marinho, Zita and\n Martins, Andr{\\'e} F. T.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.284/\",\n doi = \"10.18653/v1/2022.emnlp-main.284\",\n pages = \"4228--4245\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.284.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.284/", + "pdf_size": 788598, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9644011078591538956&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Instituto de Telecomunica\u00e7\u00f5es/Unbabel/Institute of Systems and Robotics/DeepMind/LUMLIS (Lisbon ELLIS Unit), Instituto Superior T\u00e9cnico; Google; Instituto de Telecomunica\u00e7\u00f5es/Unbabel/Institute of Systems and Robotics/DeepMind/LUMLIS (Lisbon ELLIS Unit), Instituto Superior T\u00e9cnico", + "aff_domain": "tecnico.ulisboa.pt;google.com;tecnico.ulisboa.pt", + "email": "tecnico.ulisboa.pt;google.com;tecnico.ulisboa.pt", + "github": "https://github.com/deep-spin/chunk-based_knn-mt", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Instituto Superior T\u00e9cnico;Google", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ist.utl.pt;https://www.google.com", + "aff_unique_abbr": "IST;Google", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Portugal;United States" + }, + { + "id": "2022.emnlp-main.750", + "title": "CiteSum: Citation Text-guided Scientific Extreme Summarization and Domain Adaptation with Limited Supervision", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Scientific extreme summarization (TLDR) aims to form ultra-short summaries of scientific papers. Previous efforts on curating scientific TLDR datasets failed to scale up due to the heavy human annotation and domain expertise required. In this paper, we propose a simple yet effective approach to automatically extracting TLDR summaries for scientific papers from their citation texts. Based on the proposed approach, we create a new benchmark CiteSum without human annotation, which is around 30 times larger than the previous human-curated dataset SciTLDR. We conduct a comprehensive analysis of CiteSum, examining its data characteristics and establishing strong baselines. We further demonstrate the usefulness of CiteSum by adapting models pre-trained on CiteSum (named CITES) to new tasks and domains with limited supervision. For scientific extreme summarization, CITES outperforms most fully-supervised methods on SciTLDR without any fine-tuning and obtains state-of-the-art results with only 128 examples. For news extreme summarization, CITES achieves significant gains on XSum over its base model (not pre-trained on CiteSum), e.g., +7.2 ROUGE-1 zero-shot performance and state-of-the-art few-shot performance. For news headline generation, CITES performs the best among unsupervised and zero-shot methods on Gigaword.", + "author": "Yuning Mao; Ming Zhong; Jiawei Han", + "authorids": "/y/yuning-mao/; /m/ming-zhong/; /j/jiawei-han/", + "bibtex": "@inproceedings{mao-etal-2022-citesum,\n title = \"{C}ite{S}um: Citation Text-guided Scientific Extreme Summarization and Domain Adaptation with Limited Supervision\",\n author = \"Mao, Yuning and\n Zhong, Ming and\n Han, Jiawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.750/\",\n doi = \"10.18653/v1/2022.emnlp-main.750\",\n pages = \"10922--10935\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.750.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.750/", + "pdf_size": 210007, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4264026272193980476&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 4, + "aff": "University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign", + "aff_domain": "illinois.edu;illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu", + "github": "https://github.com/morningmoni/CiteSum", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign", + "aff_unique_dep": "", + "aff_unique_url": "https://illinois.edu", + "aff_unique_abbr": "UIUC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Urbana-Champaign", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.526", + "title": "ClidSum: A Benchmark Dataset for Cross-Lingual Dialogue Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present ClidSum, a benchmark dataset towards building cross-lingual summarization systems on dialogue documents. It consists of 67k+ dialogue documents and 112k+ annotated summaries in different target languages. Based on the proposed ClidSum, we introduce two benchmark settings for supervised and semi-supervised scenarios, respectively. We then build various baseline systems in different paradigms (pipeline and end-to-end) and conduct extensive experiments on ClidSum to provide deeper analyses. Furthermore, we propose mDialBART which extends mBART via further pre-training, where the multiple objectives help the pre-trained model capture the structural characteristics as well as key content in dialogues and the transformation from source to the target language. Experimental results show the superiority of mDialBART, as an end-to-end model, outperforms strong pipeline models on ClidSum. Finally, we discuss specific challenges that current approaches faced with this task and give multiple promising directions for future research. We have released the dataset and code at https://github.com/krystalan/ClidSum.", + "author": "Jiaan Wang; Fandong Meng; Ziyao Lu; Duo Zheng; Zhixu Li; Jianfeng Qu; Jie Zhou", + "authorids": "/j/jiaan-wang/; /f/fandong-meng/; /z/ziyao-lu/; /d/duo-zheng/; /z/zhixu-li/; /j/jianfeng-qu/; /j/jie-zhou/", + "bibtex": "@inproceedings{wang-etal-2022-clidsum,\n title = \"{C}lid{S}um: A Benchmark Dataset for Cross-Lingual Dialogue Summarization\",\n author = \"Wang, Jiaan and\n Meng, Fandong and\n Lu, Ziyao and\n Zheng, Duo and\n Li, Zhixu and\n Qu, Jianfeng and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.526/\",\n doi = \"10.18653/v1/2022.emnlp-main.526\",\n pages = \"7716--7729\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.526.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.526/", + "pdf_size": 701241, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4368146056657547637&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Pattern Recognition Center, WeChat AI, Tencent Inc, China+School of Computer Science and Technology, Soochow University, Suzhou, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Beijing University of Posts and Telecommunications, Beijing, China; Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University, Shanghai, China+Pattern Recognition Center, WeChat AI, Tencent Inc, China; School of Computer Science and Technology, Soochow University, Suzhou, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China", + "aff_domain": "stu.suda.edu.cn;tencent.com; ; ;fudan.edu.cn; ; ", + "email": "stu.suda.edu.cn;tencent.com; ; ;fudan.edu.cn; ; ", + "github": "https://github.com/krystalan/ClidSum", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0;0;2;3+0;1;0", + "aff_unique_norm": "Tencent Inc;Soochow University;Beijing University of Posts and Telecommunications;Fudan University", + "aff_unique_dep": "Pattern Recognition Center, WeChat AI;School of Computer Science and Technology;;School of Computer Science", + "aff_unique_url": "https://www.tencent.com;http://www.soochow.edu.cn;http://www.bupt.edu.cn/;https://www.fudan.edu.cn", + "aff_unique_abbr": "Tencent;;BUPT;Fudan", + "aff_campus_unique_index": "1;2;3;1", + "aff_campus_unique": ";Suzhou;Beijing;Shanghai", + "aff_country_unique_index": "0+0;0;0;0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.398", + "title": "ClinicalT5: A Generative Language Model for Clinical Text", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In the past few years, large pre-trained language models (PLMs) have been widely adopted in different areas and have made fundamental improvements over a variety of downstream tasks in natural language processing (NLP). Meanwhile, domain-specific variants of PLMs are being proposed to address the needs of domains that demonstrate a specific pattern of writing and vocabulary, e.g., BioBERT for the biomedical domain and ClinicalBERT for the clinical domain. Recently, generative language models like BART and T5 are gaining popularity with their competitive performance on text generation as well as on tasks cast as generative problems. However, in the clinical domain, such domain-specific generative variants are still underexplored. To address this need, our work introduces a T5-based text-to-text transformer model pre-trained on clinical text, i.e., ClinicalT5. We evaluate the proposed model both intrinsically and extrinsically over a diverse set of tasks across multiple datasets, and show that ClinicalT5 dramatically outperforms T5 in the domain-specific tasks and compares favorably with its close baselines.", + "author": "Qiuhao Lu; Dejing Dou; Thien Nguyen", + "authorids": "/q/qiuhao-lu/; /d/dejing-dou/; /t/thien-nguyen/", + "bibtex": "@inproceedings{lu-etal-2022-clinicalt5,\n title = \"{C}linical{T}5: A Generative Language Model for Clinical Text\",\n author = \"Lu, Qiuhao and\n Dou, Dejing and\n Nguyen, Thien\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.398/\",\n doi = \"10.18653/v1/2022.findings-emnlp.398\",\n pages = \"5436--5443\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.398.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.398/", + "pdf_size": 150029, + "gs_citation": 67, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1867226916652122974&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff": "Dept. of Computer Science, University of Oregon, Eugene, OR, USA+Baidu Research; Dept. of Computer Science, University of Oregon, Eugene, OR, USA; Dept. of Computer Science, University of Oregon, Eugene, OR, USA", + "aff_domain": "cs.uoregon.edu;cs.uoregon.edu;cs.uoregon.edu", + "email": "cs.uoregon.edu;cs.uoregon.edu;cs.uoregon.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "University of Oregon;Baidu", + "aff_unique_dep": "Department of Computer Science;Baidu Research", + "aff_unique_url": "https://www.uoregon.edu;https://research.baidu.com", + "aff_unique_abbr": "UO;Baidu", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Eugene;", + "aff_country_unique_index": "0+1;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.8", + "title": "Clip-Tuning: Towards Derivative-free Prompt Learning with a Mixture of Rewards", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Derivative-free prompt learning has emerged as a lightweight alternative to prompt tuning, which only requires model inference to optimize the prompts. However, existing work did not take full advantage of the over-parameterized characteristics of large pre-trained language models (PLMs). In this paper, we propose Clip-Tuning, a simple yet effective method that adopts diverse frozen \u201cthinned\u201d networks of PLMs to obtain *a mixture of rewards* and thus advance the derivative-free prompt learning. The thinned networks consist of all the hidden units that survive a stationary dropout strategy, whose inference predictions reflect an ensemble of partial views over prompted training samples. Our method outperforms previous gradient-free prompt learning methods and achieves parity with gradient-based counterparts on seven language understanding benchmarks under few-shot settings.", + "author": "Yekun Chai; Shuohuan Wang; Yu Sun; Hao Tian; Hua Wu; Haifeng Wang", + "authorids": "/y/yekun-chai/; /s/shuohuan-wang/; /y/yu-sun/; /h/hao-tian/; /h/hua-wu/; /h/haifeng-wang/", + "bibtex": "@inproceedings{chai-etal-2022-clip,\n title = \"Clip-Tuning: Towards Derivative-free Prompt Learning with a Mixture of Rewards\",\n author = \"Chai, Yekun and\n Wang, Shuohuan and\n Sun, Yu and\n Tian, Hao and\n Wu, Hua and\n Wang, Haifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.8/\",\n doi = \"10.18653/v1/2022.findings-emnlp.8\",\n pages = \"108--117\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.8.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.8/", + "pdf_size": 626379, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7120906947104710207&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff": "Baidu; Baidu; Baidu; Baidu; Baidu; Baidu", + "aff_domain": "baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com", + "email": "baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Baidu, Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.baidu.com", + "aff_unique_abbr": "Baidu", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.12", + "title": "Co-guiding Net: Achieving Mutual Guidances between Multiple Intent Detection and Slot Filling via Heterogeneous Semantics-Label Graphs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent graph-based models for joint multiple intent detection and slot filling have obtained promising results through modeling the guidance from the prediction of intents to the decoding of slot filling.However, existing methods (1) only model the unidirectional guidance from intent to slot; (2) adopt homogeneous graphs to model the interactions between the slot semantics nodes and intent label nodes, which limit the performance.In this paper, we propose a novel model termed Co-guiding Net, which implements a two-stage framework achieving the mutual guidances between the two tasks.In the first stage, the initial estimated labels of both tasks are produced, and then they are leveraged in the second stage to model the mutual guidances.Specifically, we propose two heterogeneous graph attention networks working on the proposed two heterogeneous semantics-label graphs, which effectively represent the relations among the semantics nodes and label nodes.Experiment results show that our model outperforms existing models by a large margin, obtaining a relative improvement of 19.3% over the previous best model on MixATIS dataset in overall accuracy.", + "author": "Bowen Xing; Ivor Tsang", + "authorids": "/b/bowen-xing/; /i/ivor-tsang/", + "bibtex": "@inproceedings{xing-tsang-2022-co,\n title = \"Co-guiding Net: Achieving Mutual Guidances between Multiple Intent Detection and Slot Filling via Heterogeneous Semantics-Label Graphs\",\n author = \"Xing, Bowen and\n Tsang, Ivor\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.12/\",\n doi = \"10.18653/v1/2022.emnlp-main.12\",\n pages = \"159--169\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.12.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.12/", + "pdf_size": 855573, + "gs_citation": 45, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3045665066766530934&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Australian Artificial Intelligence Institute, University of Technology Sydney, Australia+Centre for Frontier Artificial Intelligence Research, A*STAR, Singapore; Centre for Frontier Artificial Intelligence Research, A*STAR, Singapore+Australian Artificial Intelligence Institute, University of Technology Sydney, Australia", + "aff_domain": "gmail.com;gmail.com", + "email": "gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1+0", + "aff_unique_norm": "University of Technology Sydney;A*STAR", + "aff_unique_dep": "Australian Artificial Intelligence Institute;Centre for Frontier Artificial Intelligence Research", + "aff_unique_url": "https://www.uts.edu.au;https://www.a-star.edu.sg", + "aff_unique_abbr": "UTS;A*STAR", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Sydney;", + "aff_country_unique_index": "0+1;1+0", + "aff_country_unique": "Australia;Singapore" + }, + { + "id": "2022.emnlp-industry.23", + "title": "CoCoID: Learning Contrastive Representations and Compact Clusters for Semi-Supervised Intent Discovery", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Intent discovery is to mine new intents from user utterances, which are not present in the set of manually predefined intents. Previous approaches to intent discovery usually automatically cluster novel intents with prior knowledge from intent-labeled data in a semi-supervised way. In this paper, we focus on the discriminative user utterance representation learning and the compactness of the learned intent clusters. We propose a novel semi-supervised intent discovery framework CoCoID with two essential components: contrastive user utterance representation learning and intra-cluster knowledge distillation. The former attempts to detect similar and dissimilar intents from a minibatch-wise perspective. The latter regularizes the predictive distribution of the model over samples in a cluster-wise way. We conduct experiments on both real-life challenging datasets (i.e., CLINC and BANKING) that are curated to emulate the true environment of commercial/production systems and traditional datasets (i.e., StackOverflow and DBPedia) to evaluate the proposed CoCoID. Experiment results demonstrate that our model substantially outperforms state-of-the-art intent discovery models (12 baselines) by over 1.4 ACC and ARI points and 1.1 NMI points across the four datasets. Further analyses suggest that CoCoID is able to learn contrastive representations and compact clusters for intent discovery.", + "author": "Qian Cao; Deyi Xiong; Qinlong Wang; Xia Peng", + "authorids": "/q/qian-cao/; /d/deyi-xiong/; /q/qinlong-wang/; /x/xia-peng/", + "bibtex": "@inproceedings{cao-etal-2022-cocoid,\n title = \"{C}o{C}o{ID}: Learning Contrastive Representations and Compact Clusters for Semi-Supervised Intent Discovery\",\n author = \"Cao, Qian and\n Xiong, Deyi and\n Wang, Qinlong and\n Peng, Xia\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.23/\",\n doi = \"10.18653/v1/2022.emnlp-industry.23\",\n pages = \"226--236\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.23.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.23/", + "pdf_size": 657621, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:f2uP9GgXV6EJ:scholar.google.com/&scioq=CoCoID:+Learning+Contrastive+Representations+and+Compact+Clusters+for+Semi-Supervised+Intent+Discovery&hl=en&as_sdt=0,33", + "gs_version_total": 0, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.158", + "title": "CoCoa: An Encoder-Decoder Model for Controllable Code-switched Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Code-switching has seen growing interest in recent years as an important multilingual NLP phenomenon. Generating code-switched text for data augmentation has been sufficiently well-explored. However, there is no prior work on generating code-switched text with fine-grained control on the degree of code-switching and the lexical choices used to convey formality. We present CoCoa, an encoder-decoder translation model that converts monolingual Hindi text to Hindi-English code-switched text with both encoder-side and decoder-side interventions to achieve fine-grained controllable generation. CoCoa can be invoked at test-time to synthesize code-switched text that is simultaneously faithful to syntactic and lexical attributes relevant to code-switching. CoCoa outputs were subjected to rigorous subjective and objective evaluations. Human evaluations establish that our outputs are of superior quality while being faithful to desired attributes. We show significantly improved BLEU scores when compared with human-generated code-switched references. Compared to competitive baselines, we show 10% reduction in perplexity on a language modeling task and also demonstrate clear improvements on a downstream code-switched sentiment analysis task.", + "author": "Sneha Mondal; Ritika .; Shreya Pathak; Preethi Jyothi; Aravindan Raghuveer", + "authorids": "/s/sneha-mondal/; /r/ritika/; /s/shreya-pathak/; /p/preethi-jyothi/; /a/aravindan-raghuveer/", + "bibtex": "@inproceedings{mondal-etal-2022-cocoa,\n title = \"{C}o{C}oa: An Encoder-Decoder Model for Controllable Code-switched Generation\",\n author = \"Mondal, Sneha and\n ., Ritika and\n Pathak, Shreya and\n Jyothi, Preethi and\n Raghuveer, Aravindan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.158/\",\n doi = \"10.18653/v1/2022.emnlp-main.158\",\n pages = \"2466--2479\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.158.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.158/", + "pdf_size": 1023640, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14422761788403539574&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Google Research; IIT Bombay; IIT Bombay; IIT Bombay; Google Research", + "aff_domain": "google.com;cse.iitb.ac.in;cse.iitb.ac.in;cse.iitb.ac.in;google.com", + "email": "google.com;cse.iitb.ac.in;cse.iitb.ac.in;cse.iitb.ac.in;google.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;0", + "aff_unique_norm": "Google;Indian Institute of Technology Bombay", + "aff_unique_dep": "Google Research;", + "aff_unique_url": "https://research.google;https://www.iitb.ac.in", + "aff_unique_abbr": "Google Research;IITB", + "aff_campus_unique_index": "0;1;1;1;0", + "aff_campus_unique": "Mountain View;Mumbai", + "aff_country_unique_index": "0;1;1;1;0", + "aff_country_unique": "United States;India" + }, + { + "id": "2022.findings-emnlp.449", + "title": "Code Generation From Flowcharts with Texts: A Benchmark Dataset and An Approach", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Currently, researchers focus on generating codes from the requirement documents. However, current approaches still perform poorly on some requirements needing complex problem-solving skills. In reality, to tackle such complex requirements, instead of directly translating requirement documents into codes, software engineers write codes via unified modeling language diagrams, such as flowcharts, an intermediate tool to analyze and visualize the system. Therefore, we propose a new source code generation task, that is, to generate source code from flowcharts with texts. We manually construct a benchmark dataset containing 320 flowcharts with their corresponding source codes. Obviously, it is not straightforward to employ the current approaches for the new source code generation task since (1) the flowchart is a graph that contains various structures, including loop, selection, and others which is different from texts; (2) the connections between nodes in the flowchart are abundant and diverse which need to be carefully handled. To solve the above problems, we propose a two-stage code generation model. In the first stage, a structure recognition algorithm is employed to transform the flowchart into pseudo-code containing the structural conventions of a typical programming language such as while, if. In the second stage, a code generation model is employed to convert the pseudo-code into code. Experimental results show that the proposed approach can achieve some improvement over the baselines.", + "author": "Zejie Liu; Xiaoyu Hu; Deyu Zhou; Lin Li; Xu Zhang; Yanzheng Xiang", + "authorids": "/z/zejie-liu/; /x/xiaoyu-hu/; /d/deyu-zhou/; /l/lin-li/; /x/xu-zhang/; /y/yanzheng-xiang/", + "bibtex": "@inproceedings{liu-etal-2022-code,\n title = \"Code Generation From Flowcharts with Texts: A Benchmark Dataset and An Approach\",\n author = \"Liu, Zejie and\n Hu, Xiaoyu and\n Zhou, Deyu and\n Li, Lin and\n Zhang, Xu and\n Xiang, Yanzheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.449/\",\n doi = \"10.18653/v1/2022.findings-emnlp.449\",\n pages = \"6069--6077\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.449.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.449/", + "pdf_size": 321324, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14874856999737145835&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": "Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China; Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China; Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China; Nanjing Origin Information Technology Company, China; Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China; Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China", + "aff_domain": "seu.edu.cn;seu.edu.cn;seu.edu.cn;gmail.com;seu.edu.cn;seu.edu.cn", + "email": "seu.edu.cn;seu.edu.cn;seu.edu.cn;gmail.com;seu.edu.cn;seu.edu.cn", + "github": "https://github.com/LiuZeJie97/Code-Generation-From-Flowcharts-with-Texts-A-Benchmark-Dataset-and-An-Approach", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;0", + "aff_unique_norm": "Southeast University;Nanjing Origin Information Technology Company", + "aff_unique_dep": "Key Laboratory of Computer Network and Information Integration;", + "aff_unique_url": "https://www.seu.edu.cn/;", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.459", + "title": "Code Vulnerability Detection via Nearest Neighbor Mechanism", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Code vulnerability detection is a fundamental and challenging task in the software security field. Existing research works aim to learn semantic information from the source code by utilizing NLP technologies. However, in vulnerability detection tasks, some vulnerable samples are very similar to non-vulnerable samples, which are difficult to identify. To address this issue and improve detection performance, we introduce the k-nearest neighbor mechanism which retrieves multiple neighbor samples and utilizes label information of retrieved neighbor samples to provide help for model predictions. Besides, we use supervised contrastive learning to make the model learn the discriminative representation and ensure that label information of retrieved neighbor samples is as consistent as possible with the label information of testing samples. Extensive experiments show that our method can achieve obvious performance improvements compared to baseline models.", + "author": "Qianjin Du; Xiaohui Kuang; Gang Zhao", + "authorids": "/q/qianjin-du/; /x/xiaohui-kuang/; /g/gang-zhao/", + "bibtex": "@inproceedings{du-etal-2022-code,\n title = \"Code Vulnerability Detection via Nearest Neighbor Mechanism\",\n author = \"Du, Qianjin and\n Kuang, Xiaohui and\n Zhao, Gang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.459/\",\n doi = \"10.18653/v1/2022.findings-emnlp.459\",\n pages = \"6173--6178\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.459.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.459/", + "pdf_size": 422964, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15876865830797137793&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Technology, Tsinghua University; National Key Laboratory of Science and Technology on Information System Security; National Key Laboratory of Science and Technology on Information System Security", + "aff_domain": "mails.tsinghua.edu.cn;bupt.edu.cn;163.com", + "email": "mails.tsinghua.edu.cn;bupt.edu.cn;163.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Tsinghua University;National Key Laboratory of Science and Technology on Information System Security", + "aff_unique_dep": "Department of Computer Science and Technology;", + "aff_unique_url": "https://www.tsinghua.edu.cn;", + "aff_unique_abbr": "THU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.174", + "title": "CodeExp: Explanatory Code Document Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Developing models that can automatically generate detailed code explanation can greatly benefit software maintenance and programming education. However, existing code-to-text generation models often produce only high-level summaries of code that do not capture implementation-level choices essential for these scenarios. To fill in this gap, we propose the code explanation generation task. We first conducted a human study to identify the criteria for high-quality explanatory docstring for code. Based on that, we collected and refined a large-scale code docstring corpus and formulated automatic evaluation metrics that best match human assessments. Finally, we present a multi-stage fine-tuning strategy and baseline models for the task. Our experiments show that (1) our refined training dataset lets models achieve better performance in the explanation generation tasks compared to larger-scale unrefined data (15x larger), and (2) fine-tuned models can generate well-structured long docstrings comparable to human-written ones. We envision our training dataset, human-evaluation protocol, recommended metrics, and fine-tuning strategy can boost future code explanation research. The code and annotated data are available at https://github.com/subercui/CodeExp.", + "author": "Haotian Cui; Chenglong Wang; Junjie Huang; Jeevana Priya Inala; Todd Mytkowicz; Bo Wang; Jianfeng Gao; Nan Duan", + "authorids": "/h/haotian-cui/; /c/chenglong-wang/; /j/junjie-huang/; /j/jeevana-priya-inala/; /t/todd-mytkowicz/; /b/bo-wang/; /j/jianfeng-gao/; /n/nan-duan/", + "bibtex": "@inproceedings{cui-etal-2022-codeexp,\n title = \"{C}ode{E}xp: Explanatory Code Document Generation\",\n author = \"Cui, Haotian and\n Wang, Chenglong and\n Huang, Junjie and\n Inala, Jeevana Priya and\n Mytkowicz, Todd and\n Wang, Bo and\n Gao, Jianfeng and\n Duan, Nan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.174/\",\n doi = \"10.18653/v1/2022.findings-emnlp.174\",\n pages = \"2342--2354\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.174.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.174/", + "pdf_size": 789096, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10051637475889562315&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "University of Toronto; Microsoft Research; Beihang University; Microsoft Research; Microsoft Research; University of Toronto; Microsoft Research; Microsoft Research", + "aff_domain": "mail.utoronto.ca;microsoft.com;buaa.edu.cn;microsoft.com;microsoft.com;mail.utoronto.ca;microsoft.com;microsoft.com", + "email": "mail.utoronto.ca;microsoft.com;buaa.edu.cn;microsoft.com;microsoft.com;mail.utoronto.ca;microsoft.com;microsoft.com", + "github": "https://github.com/subercui/CodeExp", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;1;1;0;1;1", + "aff_unique_norm": "University of Toronto;Microsoft Corporation;Beihang University", + "aff_unique_dep": ";Microsoft Research;", + "aff_unique_url": "https://www.utoronto.ca;https://www.microsoft.com/en-us/research;http://www.buaa.edu.cn/", + "aff_unique_abbr": "U of T;MSR;BUAA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;1;1;0;1;1", + "aff_country_unique": "Canada;United States;China" + }, + { + "id": "2022.emnlp-main.187", + "title": "CodeRetriever: A Large Scale Contrastive Pre-Training Method for Code Search", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we propose the CodeRetriever model, which learns the function-level code semantic representations through large-scale code-text contrastive pre-training. We adopt two contrastive learning schemes in CodeRetriever: unimodal contrastive learning and bimodal contrastive learning. For unimodal contrastive learning, we design an unsupervised learning approach to build semantic-related code pairs based on the documentation and function name. For bimodal contrastive learning, we leverage the documentation and in-line comments of code to build code-text pairs. Both contrastive objectives can fully leverage large-scale code corpus for pre-training. Extensive experimental results show that CodeRetriever achieves new state-of-the-art with significant improvement over existing code pre-trained models, on eleven domain/language-specific code search tasks with six programming languages in different code granularity (function-level, snippet-level and statement-level).These results demonstrate the effectiveness and robustness of CodeRetriever.The codes and resources are available at https://github.com/microsoft/AR2/tree/main/CodeRetriever.", + "author": "Xiaonan Li; Yeyun Gong; Yelong Shen; Xipeng Qiu; Hang Zhang; Bolun Yao; Weizhen Qi; Daxin Jiang; Weizhu Chen; Nan Duan", + "authorids": "/x/xiaonan-li/; /y/yeyun-gong/; /y/yelong-shen/; /x/xipeng-qiu/; /h/hang-zhang/; /b/bolun-yao/; /w/weizhen-qi/; /d/daxin-jiang/; /w/weizhu-chen/; /n/nan-duan/", + "bibtex": "@inproceedings{li-etal-2022-coderetriever,\n title = \"{C}ode{R}etriever: A Large Scale Contrastive Pre-Training Method for Code Search\",\n author = \"Li, Xiaonan and\n Gong, Yeyun and\n Shen, Yelong and\n Qiu, Xipeng and\n Zhang, Hang and\n Yao, Bolun and\n Qi, Weizhen and\n Jiang, Daxin and\n Chen, Weizhu and\n Duan, Nan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.187/\",\n doi = \"10.18653/v1/2022.emnlp-main.187\",\n pages = \"2898--2910\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.187.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.187/", + "pdf_size": 826007, + "gs_citation": 41, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9006054362978763224&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Shanghai Key Laboratory of Intelligent Information Processing, Fudan University+School of Computer Science, Fudan University; Microsoft; Microsoft; Shanghai Key Laboratory of Intelligent Information Processing, Fudan University+School of Computer Science, Fudan University; Microsoft; Microsoft; Microsoft; Microsoft; Microsoft; Microsoft", + "aff_domain": "fudan.edu.cn;microsoft.com;microsoft.com;fudan.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "fudan.edu.cn;microsoft.com;microsoft.com;fudan.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "https://github.com/microsoft/AR2/tree/main/CodeRetriever", + "project": "", + "author_num": 10, + "aff_unique_index": "0+0;1;1;0+0;1;1;1;1;1;1", + "aff_unique_norm": "Fudan University;Microsoft Corporation", + "aff_unique_dep": "Shanghai Key Laboratory of Intelligent Information Processing;", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.microsoft.com", + "aff_unique_abbr": "Fudan;Microsoft", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0+0;1;1;0+0;1;1;1;1;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.442", + "title": "Collaborative Reasoning on Multi-Modal Semantic Graphs for Video-Grounded Dialogue Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We study video-grounded dialogue generation, where a response is generated based on the dialogue context and the associated video. The primary challenges of this task lie in (1) the difficulty of integrating video data into pre-trained language models (PLMs) which presents obstacles to exploiting the power of large-scale pre-training; and (2) the necessity of taking into account the complementarity of various modalities throughout the reasoning process. Although having made remarkable progress in video-grounded dialogue generation, existing methods still fall short when it comes to integrating with PLMs in a way that allows information from different modalities to complement each other. To alleviate these issues, we first propose extracting pertinent information from videos and turning it into reasoning paths that are acceptable to PLMs. Additionally, we propose a multi-agent reinforcement learning method to collaboratively perform reasoning on different modalities (i.e., video and dialogue context). Empirical experiment results on two public datasets indicate that the proposed model can significantly outperform state-of-the-art models by large margins on both automatic and human evaluations.", + "author": "Xueliang Zhao; Yuxuan Wang; Chongyang Tao; Chenshuo Wang; Dongyan Zhao", + "authorids": "/x/xueliang-zhao/; /y/yuxuan-wang/; /c/chongyang-tao/; /c/chenshuo-wang/; /d/dongyan-zhao/", + "bibtex": "@inproceedings{zhao-etal-2022-collaborative,\n title = \"Collaborative Reasoning on Multi-Modal Semantic Graphs for Video-Grounded Dialogue Generation\",\n author = \"Zhao, Xueliang and\n Wang, Yuxuan and\n Tao, Chongyang and\n Wang, Chenshuo and\n Zhao, Dongyan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.442/\",\n doi = \"10.18653/v1/2022.findings-emnlp.442\",\n pages = \"5988--5998\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.442.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.442/", + "pdf_size": 487590, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4951319016674407955&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Wangxuan Institute of Computer Technology, Peking University+Center for Data Science, AAIS, Peking University+Beijing Institute for General Artificial Intelligence; Wangxuan Institute of Computer Technology, Peking University+Center for Data Science, AAIS, Peking University+Beijing Institute for General Artificial Intelligence; Wangxuan Institute of Computer Technology, Peking University; Wangxuan Institute of Computer Technology, Peking University+Center for Data Science, AAIS, Peking University; Wangxuan Institute of Computer Technology, Peking University+Center for Data Science, AAIS, Peking University+Beijing Institute for General Artificial Intelligence", + "aff_domain": "pku.edu.cn;stu.pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;stu.pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0+1;0+0+1;0;0+0;0+0+1", + "aff_unique_norm": "Peking University;Beijing Institute for General Artificial Intelligence", + "aff_unique_dep": "Wangxuan Institute of Computer Technology;", + "aff_unique_url": "http://www.pku.edu.cn;http://www.bigaiai.org/", + "aff_unique_abbr": "PKU;BIGAI", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0+0;0+0+0;0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.120", + "title": "ComFact: A Benchmark for Linking Contextual Commonsense Knowledge", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Understanding rich narratives, such as dialogues and stories, often requires natural language processing systems to access relevant knowledge from commonsense knowledge graphs. However, these systems typically retrieve facts from KGs using simple heuristics that disregard the complex challenges of identifying situationally-relevant commonsense knowledge (e.g., contextualization, implicitness, ambiguity).In this work, we propose the new task of commonsense fact linking, where models are given contexts and trained to identify situationally-relevant commonsense knowledge from KGs. Our novel benchmark, ComFact, contains ~293k in-context relevance annotations for commonsense triplets across four stylistically diverse dialogue and storytelling datasets. Experimental results confirm that heuristic fact linking approaches are imprecise knowledge extractors. Learned fact linking models demonstrate across-the-board performance improvements (~34.6% F1) over these heuristics. Furthermore, improved knowledge retrieval yielded average downstream improvements of 9.8% for a dialogue response generation task. However, fact linking models still significantly underperform humans, suggesting our benchmark is a promising testbed for research in commonsense augmentation of NLP systems.", + "author": "Silin Gao; Jena D. Hwang; Saya Kanno; Hiromi Wakaki; Yuki Mitsufuji; Antoine Bosselut", + "authorids": "/s/silin-gao/; /j/jena-d-hwang/; /s/saya-kanno/; /h/hiromi-wakaki/; /y/yuki-mitsufuji/; /a/antoine-bosselut/", + "bibtex": "@inproceedings{gao-etal-2022-comfact,\n title = \"{C}om{F}act: A Benchmark for Linking Contextual Commonsense Knowledge\",\n author = \"Gao, Silin and\n Hwang, Jena D. and\n Kanno, Saya and\n Wakaki, Hiromi and\n Mitsufuji, Yuki and\n Bosselut, Antoine\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.120/\",\n doi = \"10.18653/v1/2022.findings-emnlp.120\",\n pages = \"1656--1675\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.120.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.120/", + "pdf_size": 402984, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16216616369868570272&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "NLP Lab, IC, EPFL, Switzerland; Allen Institute for AI, WA, USA; Sony Group Corporation, Tokyo, Japan; Sony Group Corporation, Tokyo, Japan; Sony Group Corporation, Tokyo, Japan; NLP Lab, IC, EPFL, Switzerland", + "aff_domain": "epfl.ch;allenai.org;sony.com;sony.com;sony.com;epfl.ch", + "email": "epfl.ch;allenai.org;sony.com;sony.com;sony.com;epfl.ch", + "github": "https://github.com/Silin159/ComFact", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;2;2;0", + "aff_unique_norm": "Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne;Allen Institute for AI;Sony Group Corporation", + "aff_unique_dep": "NLP Lab;AI Research;", + "aff_unique_url": "https://www.epfl.ch;https://allenai.org;https://www.sony.com", + "aff_unique_abbr": "EPFL;AI2;Sony", + "aff_campus_unique_index": "1;2;2;2", + "aff_campus_unique": ";Seattle;Tokyo", + "aff_country_unique_index": "0;1;2;2;2;0", + "aff_country_unique": "Switzerland;United States;Japan" + }, + { + "id": "2022.findings-emnlp.424", + "title": "Combinatory Grammar Tells Underlying Relevance among Entities", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Relation extraction (RE) is an important task in natural language processing which aims to annotate the relation between two given entities, which requires a deep understanding of the running text. To import model performance, existing approaches leverage syntactic information to facilitate the relation extraction process, where they mainly focus on dependencies among words while paying limited attention to other types of syntactic structure. Considering that combinatory categorial grammar (CCG) is a lexicalized grammatical formalism that carries the syntactic and semantic knowledge for text understanding, we propose an alternative solution for RE that takes advantage of CCG to detect the relation between entities. In doing so, we perform a multi-task learning process to learn from RE and auto-annotated CCG supertags, where an attention mechanism is performed over all input words to distinguish the important ones for RE with the attention weights guided by the supertag decoding process. We evaluate our model on two widely used English benchmark datasets (i.e., ACE2005EN and SemEval 2010 Task 8 datasets) for RE, where the effectiveness of our approach is demonstrated by the experimental results with our approach achieving state-of-the-art performance on both datasets.", + "author": "Yuanhe Tian; Yan Song", + "authorids": "/y/yuanhe-tian/; /y/yan-song/", + "bibtex": "@inproceedings{tian-song-2022-combinatory,\n title = \"Combinatory Grammar Tells Underlying Relevance among Entities\",\n author = \"Tian, Yuanhe and\n Song, Yan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.424/\",\n doi = \"10.18653/v1/2022.findings-emnlp.424\",\n pages = \"5780--5786\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.424.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.424/", + "pdf_size": 436363, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14320244521355854215&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "University of Science and Technology of China+University of Washington; University of Science and Technology of China", + "aff_domain": "uw.edu;gmail.com", + "email": "uw.edu;gmail.com", + "github": "https://github.com/synlp/RE-CCG", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0", + "aff_unique_norm": "University of Science and Technology of China;University of Washington", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.washington.edu", + "aff_unique_abbr": "USTC;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.2", + "title": "Commonsense Knowledge Salience Evaluation with a Benchmark Dataset in E-commerce", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In e-commerce, the salience of commonsense knowledge (CSK) is beneficial for widespread applications such as product search and recommendation. For example, when users search for \u201crunning\u201d in e-commerce, they would like to find products highly related to running, such as \u201crunning shoes\u201d rather than \u201cshoes\u201d. Nevertheless, many existing CSK collections rank statements solely by confidence scores, and there is no information about which ones are salient from a human perspective. In this work, we define the task of supervised salience evaluation, where given a CSK triple, the model is required to learn whether the triple is salient or not. In addition to formulating the new task, we also release a new Benchmark dataset of Salience Evaluation in E-commerce (BSEE) and hope to promote related research on commonsense knowledge salience evaluation. We conduct experiments in the dataset with several representative baseline models. The experimental results show that salience evaluation is a hard task where models perform poorly on our evaluation set. We further propose a simple but effective approach, PMI-tuning, which shows promise for solving this novel problem. Code is available in https://github.com/OpenBGBenchmark/OpenBG-CSK.", + "author": "Yincen Qu; Ningyu Zhang; Hui Chen; Zelin Dai; Chengming Wang; Xiaoyu Wang; Qiang Chen; Huajun Chen", + "authorids": "/y/yincen-qu/; /n/ningyu-zhang/; /h/hui-chen/; /z/zelin-dai/; /c/chengming-wang/; /x/xiaoyu-wang/; /q/qiang-chen/; /h/huajun-chen/", + "bibtex": "@inproceedings{qu-etal-2022-commonsense,\n title = \"Commonsense Knowledge Salience Evaluation with a Benchmark Dataset in {E}-commerce\",\n author = \"Qu, Yincen and\n Zhang, Ningyu and\n Chen, Hui and\n Dai, Zelin and\n Wang, Chengming and\n Wang, Xiaoyu and\n Chen, Qiang and\n Chen, Huajun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.2/\",\n doi = \"10.18653/v1/2022.findings-emnlp.2\",\n pages = \"14--27\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.2.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.2/", + "pdf_size": 1281211, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11528771259882248155&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 7, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/OpenBGBenchmark/OpenBG-CSK", + "project": "", + "author_num": 8 + }, + { + "id": "2022.emnlp-main.546", + "title": "Communication breakdown: On the low mutual intelligibility between human and neural captioning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We compare the 0-shot performance of a neural caption-based image retriever when given as input either human-produced captions or captions generated by a neural captioner. We conduct this comparison on the recently introduced ImageCoDe data-set (Krojer et al. 2022), which contains hard distractors nearly identical to the images to be retrieved. We find that the neural retriever has much higher performance when fed neural rather than human captions, despite the fact that the former, unlike the latter, were generated without awareness of the distractors that make the task hard. Even more remarkably, when the same neural captions are given to human subjects, their retrieval performance is almost at chance level. Our results thus add to the growing body of evidence that, even when the \u201clanguage\u201d of neural models resembles English, this superficial resemblance might be deeply misleading.", + "author": "Roberto Dess\u00ec; Eleonora Gualdoni; Francesca Franzon; Gemma Boleda; Marco Baroni", + "authorids": "/r/roberto-dessi/; /e/eleonora-gualdoni/; /f/francesca-franzon/; /g/gemma-boleda/; /m/marco-baroni/", + "bibtex": "@inproceedings{dessi-etal-2022-communication,\n title = \"Communication breakdown: On the low mutual intelligibility between human and neural captioning\",\n author = \"Dess{\\`i}, Roberto and\n Gualdoni, Eleonora and\n Franzon, Francesca and\n Boleda, Gemma and\n Baroni, Marco\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.546/\",\n doi = \"10.18653/v1/2022.emnlp-main.546\",\n pages = \"7998--8007\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.546.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.546/", + "pdf_size": 8961269, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17459817030036134036&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Meta AI + Universitat Pompeu Fabra; Universitat Pompeu Fabra; Universitat Pompeu Fabra; ICREA + Universitat Pompeu Fabra; ICREA + Universitat Pompeu Fabra", + "aff_domain": "meta.com;upf.edu;upf.edu;upf.edu;upf.edu", + "email": "meta.com;upf.edu;upf.edu;upf.edu;upf.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;1;2+1;2+1", + "aff_unique_norm": "Meta Platforms, Inc.;Universitat Pompeu Fabra;Instituci\u00f3 Catalana de Recerca i Estudis Avan\u00e7ats", + "aff_unique_dep": "Meta AI;;", + "aff_unique_url": "https://meta.com;https://www.upf.edu/;https://www.icrea.cat", + "aff_unique_abbr": "Meta;UPF;ICREA", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;1;1+1;1+1", + "aff_country_unique": "United States;Spain" + }, + { + "id": "2022.emnlp-main.330", + "title": "Competency-Aware Neural Machine Translation: Can Machine Translation Know its Own Translation Quality?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Neural machine translation (NMT) is often criticized for failures that happenwithout awareness. The lack of competency awareness makes NMT untrustworthy. This is in sharp contrast to human translators who give feedback or conduct further investigations whenever they are in doubt about predictions. To fill this gap, we propose a novel competency-aware NMT by extending conventional NMT with a self-estimator, offering abilities to translate a source sentence and estimate its competency.The self-estimator encodes the information of the decoding procedure and then examines whether it can reconstruct the original semantics of the source sentence. Experimental results on four translation tasks demonstrate that the proposed method not only carries out translation tasks intact but also delivers outstanding performance on quality estimation.Without depending on any reference or annotated data typically required by state-of-the-art metric and quality estimation methods, our model yields an even higher correlation with human quality judgments than a variety of aforementioned methods, such as BLEURT, COMET, and BERTScore. Quantitative and qualitative analyses show better robustness of competency awareness in our model.", + "author": "Pei Zhang; Baosong Yang; Hao-Ran Wei; Dayiheng Liu; Kai Fan; Luo Si; Jun Xie", + "authorids": "/p/pei-zhang/; /b/baosong-yang/; /h/hao-ran-wei/; /d/dayiheng-liu/; /k/kai-fan/; /l/luo-si/; /j/jun-xie/", + "bibtex": "@inproceedings{zhang-etal-2022-competency,\n title = \"Competency-Aware Neural Machine Translation: Can Machine Translation Know its Own Translation Quality?\",\n author = \"Zhang, Pei and\n Yang, Baosong and\n Wei, Hao-Ran and\n Liu, Dayiheng and\n Fan, Kai and\n Si, Luo and\n Xie, Jun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.330/\",\n doi = \"10.18653/v1/2022.emnlp-main.330\",\n pages = \"4959--4970\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.330.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.330/", + "pdf_size": 2251058, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7627018603034194969&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff": "Alibaba Group Inc.; Alibaba Group Inc.; Alibaba Group Inc.; Alibaba Group Inc.; Alibaba Group Inc.; Alibaba Group Inc.; Alibaba Group Inc.", + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com; ; ;", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com; ; ;", + "github": "https://github.com/xiaoyi0814/CANMT", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.349", + "title": "Complex Hyperbolic Knowledge Graph Embeddings with Fast Fourier Transform", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The choice of geometric space for knowledge graph (KG) embeddings can have significant effects on the performance of KG completion tasks. The hyperbolic geometry has been shown to capture the hierarchical patterns due to its tree-like metrics, which addressed the limitations of the Euclidean embedding models. Recent explorations of the complex hyperbolic geometry further improved the hyperbolic embeddings for capturing a variety of hierarchical structures. However, the performance of the hyperbolic KG embedding models for non-transitive relations is still unpromising, while the complex hyperbolic embeddings do not deal with multi-relations. This paper aims to utilize the representation capacity of the complex hyperbolic geometry in multi-relational KG embeddings. To apply the geometric transformations which account for different relations and the attention mechanism in the complex hyperbolic space, we propose to use the fast Fourier transform (FFT) as the conversion between the real and complex hyperbolic space. Constructing the attention-based transformations in the complex space is very challenging, while the proposed Fourier transform-based complex hyperbolic approaches provide a simple and effective solution. Experimental results show that our methods outperform the baselines, including the Euclidean and the real hyperbolic embedding models.", + "author": "Huiru Xiao; Xin Liu; Yangqiu Song; Ginny Wong; Simon See", + "authorids": "/h/huiru-xiao/; /x/xin-liu/; /y/yangqiu-song/; /g/ginny-wong/; /s/simon-see/", + "bibtex": "@inproceedings{xiao-etal-2022-complex,\n title = \"Complex Hyperbolic Knowledge Graph Embeddings with Fast {F}ourier Transform\",\n author = \"Xiao, Huiru and\n Liu, Xin and\n Song, Yangqiu and\n Wong, Ginny and\n See, Simon\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.349/\",\n doi = \"10.18653/v1/2022.emnlp-main.349\",\n pages = \"5228--5239\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.349.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.349/", + "pdf_size": 1462662, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14274618926206682019&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 3, + "aff": "Hong Kong University of Science and Technology, Hong Kong SAR; Hong Kong University of Science and Technology, Hong Kong SAR; Hong Kong University of Science and Technology, Hong Kong SAR; NVIDIA AI Technology Center (NV ATIC), NVIDIA, Santa Clara, USA; NVIDIA AI Technology Center (NV ATIC), NVIDIA, Santa Clara, USA", + "aff_domain": "cse.ust.hk;cse.ust.hk;cse.ust.hk;nvidia.com;nvidia.com", + "email": "cse.ust.hk;cse.ust.hk;cse.ust.hk;nvidia.com;nvidia.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;1", + "aff_unique_norm": "Hong Kong University of Science and Technology;NVIDIA", + "aff_unique_dep": ";NVIDIA AI Technology Center (NV ATIC)", + "aff_unique_url": "https://www.ust.hk;https://www.nvidia.com", + "aff_unique_abbr": "HKUST;NVIDIA", + "aff_campus_unique_index": "0;0;0;1;1", + "aff_campus_unique": "Hong Kong;Santa Clara", + "aff_country_unique_index": "0;0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.486", + "title": "Composing Ci with Reinforced Non-autoregressive Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Composing Ci (also widely known as Song Ci), a special type of classical Chinese poetry, requires to follow particular format once their tune patterns are given. To automatically generate a well-formed Ci, text generation systems should strictly take into account pre-defined rigid formats (e.g., length and rhyme). Yet, most existing approaches regard Ci generation as a conventional sequence-to-sequence task and use autoregressive models, while it is challenging for such models to properly handle the constraints (according to tune patterns) of Ci during the generation process. Moreover, consider that with the format prepared, Ci generation can be operated by an efficient synchronous process, where autoregressive models are limited in doing so since they follow the character-by-character generation protocol. Therefore, in this paper, we propose to compose Ci through a non-autoregressive approach, which not only ensure that the generation process accommodates tune patterns by controlling the rhythm and essential meaning of each sentence, but also allow the model to perform synchronous generation. In addition, we further improve our approach by applying reinforcement learning to the generation process with the rigid constraints of Ci as well as the diversity in content serving as rewards, so as to further maintain the format and content requirement. Experiments on a collected Ci dataset confirm that our proposed approach outperforms strong baselines and previous studies in terms of both automatic evaluation metrics and human judgements.", + "author": "Yan Song", + "authorids": "/y/yan-song/", + "bibtex": "@inproceedings{song-2022-composing,\n title = \"Composing Ci with Reinforced Non-autoregressive Text Generation\",\n author = \"Song, Yan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.486/\",\n doi = \"10.18653/v1/2022.emnlp-main.486\",\n pages = \"7219--7229\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.486.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.486/", + "pdf_size": 1351792, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7403095170050834545&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "University of Science and Technology of China", + "aff_domain": "gmail.com", + "email": "gmail.com", + "github": "https://github.com/synlp/CiGen", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Science and Technology of China", + "aff_unique_dep": "", + "aff_unique_url": "http://www.ustc.edu.cn", + "aff_unique_abbr": "USTC", + "aff_country_unique_index": "0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.428", + "title": "Composition, Attention, or Both?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this paper, we propose a novel architecture called Composition Attention Grammars (CAGs) that recursively compose subtrees into a single vector representation with a composition function, and selectively attend to previous structural information with a self-attention mechanism. We investigate whether these components\u2014the composition function and the self-attention mechanism\u2014can both induce human-like syntactic generalization. Specifically, we train language models (LMs) with and without these two components with the model sizes carefully controlled, and evaluate their syntactic generalization performance against six test circuits on the SyntaxGym benchmark. The results demonstrated that the composition function and the self-attention mechanism both play an important role to make LMs more human-like, and closer inspection of linguistic phenomenon implied that the composition function allowed syntactic features, but not semantic features, to percolate into subtree representations.", + "author": "Ryo Yoshida; Yohei Oseki", + "authorids": "/r/ryo-yoshida/; /y/yohei-oseki/", + "bibtex": "@inproceedings{yoshida-oseki-2022-composition,\n title = \"Composition, Attention, or Both?\",\n author = \"Yoshida, Ryo and\n Oseki, Yohei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.428/\",\n doi = \"10.18653/v1/2022.findings-emnlp.428\",\n pages = \"5822--5834\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.428.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.428/", + "pdf_size": 474564, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15510457939604743750&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "The University of Tokyo; The University of Tokyo", + "aff_domain": "g.ecc.u-tokyo.ac.jp;g.ecc.u-tokyo.ac.jp", + "email": "g.ecc.u-tokyo.ac.jp;g.ecc.u-tokyo.ac.jp", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Tokyo", + "aff_unique_dep": "", + "aff_unique_url": "https://www.u-tokyo.ac.jp", + "aff_unique_abbr": "UTokyo", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.findings-emnlp.463", + "title": "Con-NAT: Contrastive Non-autoregressive Neural Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Inspired by the success of contrastive learning in natural language processing, we incorporate contrastive learning into the conditional masked language model which is extensively used in non-autoregressive neural machine translation (NAT). Accordingly, we propose a Contrastive Non-autoregressive Neural Machine Translation (Con-NAT) model. Con-NAT optimizes the similarity of several different representations of the same token in the same sentence. We propose two methods to obtain various representations: Contrastive Common Mask and Contrastive Dropout. Positive pairs are various different representations of the same token, while negative pairs are representations of different tokens. In the feature space, the model with contrastive loss pulls positive pairs together and pushes negative pairs away. We conduct extensive experiments on six translation directions with different data sizes. The results demonstrate that Con-NAT showed a consistent and significant improvement in fully and iterative NAT. Con-NAT is state-of-the-art on WMT\u201916 Ro-En (34.18 BLEU).", + "author": "Hao Cheng; Zhihua Zhang", + "authorids": "/h/hao-cheng/; /z/zhihua-zhang/", + "bibtex": "@inproceedings{cheng-zhang-2022-con,\n title = \"Con-{NAT}: Contrastive Non-autoregressive Neural Machine Translation\",\n author = \"Cheng, Hao and\n Zhang, Zhihua\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.463/\",\n doi = \"10.18653/v1/2022.findings-emnlp.463\",\n pages = \"6219--6231\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.463.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.463/", + "pdf_size": 516695, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6711446913363571174&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Academy for Advanced Interdisciplinary Studies, Peking University; School of Mathematical Sciences, Peking University", + "aff_domain": "pku.edu.cn;math.pku.edu.cn", + "email": "pku.edu.cn;math.pku.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "Academy for Advanced Interdisciplinary Studies", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.483", + "title": "ConGen: Unsupervised Control and Generalization Distillation For Sentence Representation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Sentence representations are essential in many NLP tasks operating at the sentence level.Recently, research attention has shifted towards learning how to represent sentences without any annotations, i.e., unsupervised representation learning. Despite the benefit of training without supervised data, there is still a performance penalty compared to supervised methods.Furthermore, the supervised-unsupervised performance gap widens as we reduce the model size. In this paper, we propose an unsupervised sentence representation method to reduce the supervised-unsupervised performance gap, especially for smaller models. Utilizing the concept for knowledge distillation, we derive a distillation framework comprising two training objectives, control and generalize, called ConGen. Experiments on semantic textual similarity (STS), text classification (transfer), and natural language inference (NLI) tasks show that ConGen is on par with supervised training even on smaller models.Furthermore, our method consistently outperformed competitors on multilingual STS.The code and models are available at https://github.com/KornWtp/ConGen.", + "author": "Peerat Limkonchotiwat; Wuttikorn Ponwitayarat; Lalita Lowphansirikul; Can Udomcharoenchaikit; Ekapol Chuangsuwanich; Sarana Nutanong", + "authorids": "/p/peerat-limkonchotiwat/; /w/wuttikorn-ponwitayarat/; /l/lalita-lowphansirikul/; /c/can-udomcharoenchaikit/; /e/ekapol-chuangsuwanich/; /s/sarana-nutanong/", + "bibtex": "@inproceedings{limkonchotiwat-etal-2022-congen,\n title = \"{C}on{G}en: Unsupervised Control and Generalization Distillation For Sentence Representation\",\n author = \"Limkonchotiwat, Peerat and\n Ponwitayarat, Wuttikorn and\n Lowphansirikul, Lalita and\n Udomcharoenchaikit, Can and\n Chuangsuwanich, Ekapol and\n Nutanong, Sarana\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.483/\",\n doi = \"10.18653/v1/2022.findings-emnlp.483\",\n pages = \"6467--6480\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.483.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.483/", + "pdf_size": 644117, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16571609232047051254&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Information Science and Technology, VISTEC, Thailand; School of Information Science and Technology, VISTEC, Thailand; School of Information Science and Technology, VISTEC, Thailand; School of Information Science and Technology, VISTEC, Thailand; Department of Computer Engineering, Chulalongkorn University, Thailand; School of Information Science and Technology, VISTEC, Thailand", + "aff_domain": "vistec.ac.th;vistec.ac.th;vistec.ac.th;vistec.ac.th;cp.eng.chula.ac.th;vistec.ac.th", + "email": "vistec.ac.th;vistec.ac.th;vistec.ac.th;vistec.ac.th;cp.eng.chula.ac.th;vistec.ac.th", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "VISTEC;Chulalongkorn University", + "aff_unique_dep": "School of Information Science and Technology;Department of Computer Engineering", + "aff_unique_url": "https://www.vistec.ac.th;http://www.chula.ac.th", + "aff_unique_abbr": "VISTEC;Chula", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Bangkok", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Thailand" + }, + { + "id": "2022.emnlp-main.577", + "title": "ConNER: Consistency Training for Cross-lingual Named Entity Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Cross-lingual named entity recognition (NER) suffers from data scarcity in the target languages, especially under zero-shot settings. Existing translate-train or knowledge distillation methods attempt to bridge the language gap, but often introduce a high level of noise. To solve this problem, consistency training methods regularize the model to be robust towards perturbations on data or hidden states.However, such methods are likely to violate the consistency hypothesis, or mainly focus on coarse-grain consistency.We propose ConNER as a novel consistency training framework for cross-lingual NER, which comprises of: (1) translation-based consistency training on unlabeled target-language data, and (2) dropout-based consistency training on labeled source-language data. ConNER effectively leverages unlabeled target-language data and alleviates overfitting on the source language to enhance the cross-lingual adaptability. Experimental results show our ConNER achieves consistent improvement over various baseline methods.", + "author": "Ran Zhou; Xin Li; Lidong Bing; Erik Cambria; Luo Si; Chunyan Miao", + "authorids": "/r/ran-zhou/; /x/xin-li/; /l/lidong-bing/; /e/erik-cambria/; /l/luo-si/; /c/chunyan-miao/", + "bibtex": "@inproceedings{zhou-etal-2022-conner,\n title = \"{C}on{NER}: Consistency Training for Cross-lingual Named Entity Recognition\",\n author = \"Zhou, Ran and\n Li, Xin and\n Bing, Lidong and\n Cambria, Erik and\n Si, Luo and\n Miao, Chunyan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.577/\",\n doi = \"10.18653/v1/2022.emnlp-main.577\",\n pages = \"8438--8449\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.577.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.577/", + "pdf_size": 512206, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2799590763500950176&as_sdt=20005&sciodt=0,9&hl=en", + "gs_version_total": 10, + "aff": "DAMO Academy, Alibaba Group+Joint Ph.D. Program between Alibaba and Nanyang Technological University; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; Nanyang Technological University, Singapore; DAMO Academy, Alibaba Group; Nanyang Technological University, Singapore", + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;ntu.edu.sg;alibaba-inc.com;ntu.edu.sg", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;ntu.edu.sg;alibaba-inc.com;ntu.edu.sg", + "github": "https://github.com/RandyZhouRan/ConNER", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;0;1;0;1", + "aff_unique_norm": "Alibaba Group;Nanyang Technological University", + "aff_unique_dep": "DAMO Academy;Joint Ph.D. Program", + "aff_unique_url": "https://www.alibaba-group.com;https://www.ntu.edu.sg", + "aff_unique_abbr": "Alibaba;NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0;1;0;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.emnlp-main.166", + "title": "ConReader: Exploring Implicit Relations in Contracts for Contract Clause Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We study automatic Contract Clause Extraction (CCE) by modeling implicit relations in legal contracts. Existing CCE methods mostly treat contracts as plain text, creating a substantial barrier to understanding contracts of high complexity. In this work, we first comprehensively analyze the complexity issues of contracts and distill out three implicit relations commonly found in contracts, namely, 1) Long-range Context Relation that captures the correlations of distant clauses; 2) Term-Definition Relation that captures the relation between important terms with their corresponding definitions, and 3) Similar Clause Relation that captures the similarities between clauses of the same type. Then we propose a novel framework ConReader to exploit the above three relations for better contract understanding and improving CCE. Experimental results show that ConReader makes the prediction more interpretable and achieves new state-of-the-art on two CCE tasks in both conventional and zero-shot settings.", + "author": "Weiwen Xu; Yang Deng; Wenqiang Lei; Wenlong Zhao; Tat-Seng Chua; Wai Lam", + "authorids": "/w/weiwen-xu/; /y/yang-deng/; /w/wenqiang-lei/; /w/wenlong-zhao/; /t/tat-seng-chua/; /w/wai-lam/", + "bibtex": "@inproceedings{xu-etal-2022-conreader,\n title = \"{C}on{R}eader: Exploring Implicit Relations in Contracts for Contract Clause Extraction\",\n author = \"Xu, Weiwen and\n Deng, Yang and\n Lei, Wenqiang and\n Zhao, Wenlong and\n Chua, Tat-Seng and\n Lam, Wai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.166/\",\n doi = \"10.18653/v1/2022.emnlp-main.166\",\n pages = \"2581--2594\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.166.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.166/", + "pdf_size": 1454858, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14380952307404393326&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "The Chinese University of Hong Kong; The Chinese University of Hong Kong; Sichuan University; The Chinese University of Hong Kong; National University of Singapore; The Chinese University of Hong Kong", + "aff_domain": "se.cuhk.edu.hk;se.cuhk.edu.hk;gmail.com;gmail.com;comp.nus.edu.sg;se.cuhk.edu.hk", + "email": "se.cuhk.edu.hk;se.cuhk.edu.hk;gmail.com;gmail.com;comp.nus.edu.sg;se.cuhk.edu.hk", + "github": "https://github.com/wwxu21/ConReader", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;2;0", + "aff_unique_norm": "The Chinese University of Hong Kong;Sichuan University;National University of Singapore", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.cuhk.edu.hk;https://www.scu.edu.cn;https://www.nus.edu.sg", + "aff_unique_abbr": "CUHK;SCU;NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.emnlp-main.308", + "title": "Concadia: Towards Image-Based Text Generation with a Purpose", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Current deep learning models often achieve excellent results on benchmark image-to-text datasets but fail to generate texts that are useful in practice. We argue that to close this gap, it is vital to distinguish descriptions from captions based on their distinct communicative roles. Descriptions focus on visual features and are meant to replace an image (often to increase accessibility), whereas captions appear alongside an image to supply additional information. To motivate this distinction and help people put it into practice, we introduce the publicly available Wikipedia-based dataset Concadia consisting of 96,918 images with corresponding English-language descriptions, captions, and surrounding context. Using insights from Concadia, models trained on it, and a preregistered human-subjects experiment with human- and model-generated texts, we characterize the commonalities and differences between descriptions and captions. In addition, we show that, for generating both descriptions and captions, it is useful to augment image-to-text models with representations of the textual context in which the image appeared.", + "author": "Elisa Kreiss; Fei Fang; Noah Goodman; Christopher Potts", + "authorids": "/e/elisa-kreiss/; /f/fei-fang/; /n/noah-goodman/; /c/christopher-potts/", + "bibtex": "@inproceedings{kreiss-etal-2022-concadia,\n title = \"Concadia: Towards Image-Based Text Generation with a Purpose\",\n author = \"Kreiss, Elisa and\n Fang, Fei and\n Goodman, Noah and\n Potts, Christopher\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.308/\",\n doi = \"10.18653/v1/2022.emnlp-main.308\",\n pages = \"4667--4684\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.308.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.308/", + "pdf_size": 7189777, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10976534871659696578&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Department of Linguistics; Department of Computer Science; Department of Computer Science + Department of Psychology; Department of Linguistics", + "aff_domain": "stanford.edu;stanford.edu;stanford.edu;stanford.edu", + "email": "stanford.edu;stanford.edu;stanford.edu;stanford.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1+0;0", + "aff_unique_norm": "University Affiliation Not Specified;Unknown Institution", + "aff_unique_dep": "Department of Linguistics;Department of Computer Science", + "aff_unique_url": ";", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "", + "aff_country_unique": "" + }, + { + "id": "2022.findings-emnlp.199", + "title": "Conditional Supervised Contrastive Learning for Fair Text Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Contrastive representation learning has gained much attention due to its superior performance in learning representations from both image and sequential data. However, the learned representations could potentially lead to performance disparities in downstream tasks, such as increased silencing of underrepresented groups in toxicity comment classification. In light of this challenge, in this work, we study learning fair representations that satisfy a notion of fairness known as equalized odds for text classification via contrastive learning. Specifically, we first theoretically analyze the connections between learning representations with a fairness constraint and conditional supervised contrastive objectives, and then propose to use conditional supervised contrastive objectives to learn fair representations for text classification. We conduct experiments on two text datasets to demonstrate the effectiveness of our approaches in balancing the trade-offs between task performance and bias mitigation among existing baselines for text classification. Furthermore, we also show that the proposed methods are stable in different hyperparameter settings.", + "author": "Jianfeng Chi; William Shand; Yaodong Yu; Kai-Wei Chang; Han Zhao; Yuan Tian", + "authorids": "/j/jianfeng-chi/; /w/william-shand/; /y/yaodong-yu/; /k/kai-wei-chang/; /h/han-zhao/; /y/yuan-tian/", + "bibtex": "@inproceedings{chi-etal-2022-conditional,\n title = \"Conditional Supervised Contrastive Learning for Fair Text Classification\",\n author = \"Chi, Jianfeng and\n Shand, William and\n Yu, Yaodong and\n Chang, Kai-Wei and\n Zhao, Han and\n Tian, Yuan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.199/\",\n doi = \"10.18653/v1/2022.findings-emnlp.199\",\n pages = \"2736--2756\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.199.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.199/", + "pdf_size": 4515695, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14562770247749856387&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Virginia; University of Virginia; UC Berkeley; UCLA; UIUC; UCLA", + "aff_domain": "virginia.edu;virginia.edu;eecs.berkeley.edu;cs.ucla.edu;illinois.edu;ucla.edu", + "email": "virginia.edu;virginia.edu;eecs.berkeley.edu;cs.ucla.edu;illinois.edu;ucla.edu", + "github": "https://github.com/JFChi/CSCL4FTC", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;3;2", + "aff_unique_norm": "University of Virginia;University of California, Berkeley;University of California, Los Angeles;University of Illinois at Urbana-Champaign", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.virginia.edu;https://www.berkeley.edu;https://www.ucla.edu;https://www illinois.edu", + "aff_unique_abbr": "UVA;UC Berkeley;UCLA;UIUC", + "aff_campus_unique_index": "1;2;3;2", + "aff_campus_unique": ";Berkeley;Los Angeles;Urbana-Champaign", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.324", + "title": "Conditional set generation using Seq2seq models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Conditional set generation learns a mapping from an input sequence of tokens to a set. Several NLP tasks, such as entity typing and dialogue emotion tagging, are instances of set generation. Seq2Seq models are a popular choice to model set generation but they treat a set as a sequence and do not fully leverage its key properties, namely order-invariance and cardinality. We propose a novel algorithm for effectively sampling informative orders over the combinatorial space of label orders. Further, we jointly model the set cardinality and output by listing the set size as the first element and taking advantage of the autoregressive factorization used by Seq2Seq models. Our method is a model-independent data augmentation approach that endows any Seq2Seq model with the signals of order-invariance and cardinality. Training a Seq2Seq model on this new augmented data (without any additional annotations), gets an average relative improvement of 20% for four benchmarks datasets across models spanning from BART-base, T5-11B, and GPT-3. We will release all code and data upon acceptance.", + "author": "Aman Madaan; Dheeraj Rajagopal; Niket Tandon; Yiming Yang; Antoine Bosselut", + "authorids": "/a/aman-madaan/; /d/dheeraj-rajagopal/; /n/niket-tandon/; /y/yiming-yang/; /a/antoine-bosselut/", + "bibtex": "@inproceedings{madaan-etal-2022-conditional,\n title = \"Conditional set generation using Seq2seq models\",\n author = \"Madaan, Aman and\n Rajagopal, Dheeraj and\n Tandon, Niket and\n Yang, Yiming and\n Bosselut, Antoine\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.324/\",\n doi = \"10.18653/v1/2022.emnlp-main.324\",\n pages = \"4874--4896\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.324.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.324/", + "pdf_size": 2014797, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14573663823344679815&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Language Technologies Institute, Carnegie Mellon University, Pittsburgh, PA, USA; Language Technologies Institute, Carnegie Mellon University, Pittsburgh, PA, USA; Allen Institute for Artificial Intelligence, Seattle, WA, USA; Language Technologies Institute, Carnegie Mellon University, Pittsburgh, PA, USA; EPFL, Switzerland", + "aff_domain": "cs.cmu.edu; ; ; ; ", + "email": "cs.cmu.edu; ; ; ; ", + "github": "", + "project": "https://setgen.structgen.com", + "author_num": 5, + "aff_unique_index": "0;0;1;0;2", + "aff_unique_norm": "Carnegie Mellon University;Allen Institute for Artificial Intelligence;\u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne", + "aff_unique_dep": "Language Technologies Institute;;", + "aff_unique_url": "https://www.cmu.edu;https://allenai.org;https://www.epfl.ch", + "aff_unique_abbr": "CMU;AI2;EPFL", + "aff_campus_unique_index": "0;0;1;0", + "aff_campus_unique": "Pittsburgh;Seattle;", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "United States;Switzerland" + }, + { + "id": "2022.findings-emnlp.10", + "title": "Conditioned Masked Language and Image Modeling for Image-Text Dense Retrieval", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Image-text retrieval is a fundamental cross-modal task that takes image/text as a query to retrieve relevant data of another type. The large-scale two-stream pre-trained models like CLIP have achieved tremendous success in this area. They embed the images and texts into instance representations with two separate encoders, aligning them on the instance-level with contrastive learning. Beyond this, the following works adopt the fine-grained token-level interaction (Masked Language and Image Modeling) to boost performance further. However, the vanilla token-level objectives are not designed to aggregate the image-text alignment information into the instance representations, but the token representations, causing a gap between pre-training and application. To address this issue, we carefully design two novel conditioned token-level pre-training objectives, Conditioned Masked Language and Image Modeling (ConMLM and ConMIM), forcing models to aggregate the token-level alignment information into the instance representations. Combing with the instance-level contrastive learning, we propose our cross-modal dense retrieval framework, Conditioned Language-Image Pre-training (ConLIP). Experimental results on two popular cross-modal retrieval benchmarks (MSCOCO and Flickr30k) reveal the effectiveness of our methods.", + "author": "Ziyang Luo; Yadong Xi; Rongsheng Zhang; GongZheng Li; Zeng Zhao; Jing Ma", + "authorids": "/z/ziyang-luo/; /y/yadong-xi/; /r/rongsheng-zhang/; /g/gongzheng-li/; /z/zeng-zhao/; /j/jing-ma/", + "bibtex": "@inproceedings{luo-etal-2022-conditioned,\n title = \"Conditioned Masked Language and Image Modeling for Image-Text Dense Retrieval\",\n author = \"Luo, Ziyang and\n Xi, Yadong and\n Zhang, Rongsheng and\n Li, GongZheng and\n Zhao, Zeng and\n Ma, Jing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.10/\",\n doi = \"10.18653/v1/2022.findings-emnlp.10\",\n pages = \"130--140\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.10.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.10/", + "pdf_size": 1042538, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3024212710633951717&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, Hong Kong Baptist University, Hong Kong SAR, China+1; Fuxi AI Lab, NetEase Inc., Hangzhou, China+2; Fuxi AI Lab, NetEase Inc., Hangzhou, China+2; Fuxi AI Lab, NetEase Inc., Hangzhou, China+2; Fuxi AI Lab, NetEase Inc., Hangzhou, China+2; Department of Computer Science, Hong Kong Baptist University, Hong Kong SAR, China+1", + "aff_domain": "comp.hkbu.edu.hk;corp.netease.com;corp.netease.com; ; ;hkbu.edu.hk", + "email": "comp.hkbu.edu.hk;corp.netease.com;corp.netease.com; ; ;hkbu.edu.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;2;2;2;2;0", + "aff_unique_norm": "Hong Kong Baptist University;;NetEase Inc.", + "aff_unique_dep": "Department of Computer Science;;Fuxi AI Lab", + "aff_unique_url": "https://www.hkbu.edu.hk;;https://www.163.com", + "aff_unique_abbr": "HKBU;;NetEase", + "aff_campus_unique_index": "0;2;2;2;2;0", + "aff_campus_unique": "Hong Kong;;Hangzhou", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "2022.emnlp-main.196", + "title": "Conformal Predictor for Improving Zero-Shot Text Classification Efficiency", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained language models (PLMs) have been shown effective for zero-shot (0shot) text classification. 0shot models based on natural language inference (NLI) and next sentence prediction (NSP) employ cross-encoder architecture and infer by making a forward pass through the model for each label-text pair separately. This increases the computational cost to make inferences linearly in the number of labels. In this work, we improve the efficiency of such cross-encoder-based 0shot models by restricting the number of likely labels using another fast base classifier-based conformal predictor (CP) calibrated on samples labeled by the 0shot model. Since a CP generates prediction sets with coverage guarantees, it reduces the number of target labels without excluding the most probable label based on the 0shot model. We experiment with three intent and two topic classification datasets. With a suitable CP for each dataset, we reduce the average inference time for NLI- and NSP-based models by 25.6% and 22.2% respectively, without dropping performance below the predefined error rate of 1%.", + "author": "Prafulla Kumar Choubey; Yu Bai; Chien-Sheng Wu; Wenhao Liu; Nazneen Rajani", + "authorids": "/p/prafulla-kumar-choubey/; /y/yu-bai/; /c/chien-sheng-wu/; /w/wenhao-liu/; /n/nazneen-rajani/", + "bibtex": "@inproceedings{choubey-etal-2022-conformal,\n title = \"Conformal Predictor for Improving Zero-Shot Text Classification Efficiency\",\n author = \"Choubey, Prafulla Kumar and\n Bai, Yu and\n Wu, Chien-Sheng and\n Liu, Wenhao and\n Rajani, Nazneen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.196/\",\n doi = \"10.18653/v1/2022.emnlp-main.196\",\n pages = \"3027--3034\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.196.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.196/", + "pdf_size": 616510, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12398867714812257356&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Salesforce AI Research; Salesforce AI Research; Salesforce AI Research; Faire.com; Hugging Face", + "aff_domain": "salesforce.com;salesforce.com;salesforce.com;faire.com;hf.co", + "email": "salesforce.com;salesforce.com;salesforce.com;faire.com;hf.co", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;2", + "aff_unique_norm": "Salesforce;Faire;Hugging Face", + "aff_unique_dep": "Salesforce AI Research;;", + "aff_unique_url": "https://www.salesforce.com;https://www.faire.com;https://huggingface.co", + "aff_unique_abbr": "Salesforce AI;;Hugging Face", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.493", + "title": "Consecutive Question Generation via Dynamic Multitask Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this paper, we propose the task of consecutive question generation (CQG), which generates a set of logically related question-answer pairs to understand a whole passage, with a comprehensive consideration of the aspects including accuracy, coverage, and informativeness.To achieve this, we first examine the four key elements of CQG, i.e., question, answer, rationale, and context history, and propose a novel dynamic multitask framework with one main task generating a question-answer pair, and four auxiliary tasks generating other elements. It directly helps the model generate good questions through both joint training and self-reranking. At the same time, to fully explore the worth-asking information in a given passage, we make use of the reranking losses to sample the rationales and search for the best question series globally.Finally, we measure our strategy by QA data augmentation and manual evaluation, as well as a novel application of generated question-answer pairs on DocNLI. We prove that our strategy can improve question generation significantly and benefit multiple related NLP tasks.", + "author": "Yunji Li; Sujian Li; Xing Shi", + "authorids": "/y/yunji-li/; /s/sujian-li/; /x/xing-shi/", + "bibtex": "@inproceedings{li-etal-2022-consecutive,\n title = \"Consecutive Question Generation via Dynamic Multitask Learning\",\n author = \"Li, Yunji and\n Li, Sujian and\n Shi, Xing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.493/\",\n doi = \"10.18653/v1/2022.findings-emnlp.493\",\n pages = \"6620--6635\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.493.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.493/", + "pdf_size": 375982, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4153440096548202240&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "MOE Key Lab of Computational Linguistics, Peking University, China+ByteDance Lark Search; MOE Key Lab of Computational Linguistics, Peking University, China; ByteDance Lark Search", + "aff_domain": "bytedance.com;pku.edu.cn;bytedance.com", + "email": "bytedance.com;pku.edu.cn;bytedance.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;1", + "aff_unique_norm": "Peking University;ByteDance", + "aff_unique_dep": "MOE Key Lab of Computational Linguistics;Lark Search", + "aff_unique_url": "http://www.pku.edu.cn;https://www.bytedance.com", + "aff_unique_abbr": "PKU;ByteDance", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.574", + "title": "ConsistTL: Modeling Consistency in Transfer Learning for Low-Resource Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transfer learning is a simple and powerful method that can be used to boost model performance of low-resource neural machine translation (NMT). Existing transfer learning methods for NMT are static, which simply transfer knowledge from a parent model to a child model once via parameter initialization. In this paper, we propose a novel transfer learning method for NMT, namely ConsistTL, which can continuously transfer knowledge from the parent model during the training of the child model. Specifically, for each training instance of the child model, ConsistTL constructs the semantically-equivalent instance for the parent model and encourages prediction consistency between the parent and child for this instance, which is equivalent to the child model learning each instance under the guidance of the parent model. Experimental results on five low-resource NMT tasks demonstrate that ConsistTL results in significant improvements over strong transfer learning baselines, with a gain up to 1.7 BLEU over the existing back-translation model on the widely-used WMT17 Turkish-English benchmark. Further analysis reveals that ConsistTL can improve the inference calibration of the child model. Code and scripts are freely available at https://github.com/NLP2CT/ConsistTL.", + "author": "Zhaocong Li; Xuebo Liu; Derek F. Wong; Lidia S. Chao; Min Zhang", + "authorids": "/z/zhaocong-li/; /x/xuebo-liu/; /d/derek-f-wong/; /l/lidia-s-chao/; /m/min-zhang/", + "bibtex": "@inproceedings{li-etal-2022-consisttl,\n title = \"{C}onsist{TL}: Modeling Consistency in Transfer Learning for Low-Resource Neural Machine Translation\",\n author = \"Li, Zhaocong and\n Liu, Xuebo and\n Wong, Derek F. and\n Chao, Lidia S. and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.574/\",\n doi = \"10.18653/v1/2022.emnlp-main.574\",\n pages = \"8383--8394\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.574.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.574/", + "pdf_size": 1001912, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1158706826095876904&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "https://github.com/NLP2CT/ConsistTL", + "project": "", + "author_num": 5 + }, + { + "id": "2022.findings-emnlp.270", + "title": "Constructing Highly Inductive Contexts for Dialogue Safety through Controllable Reverse Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large pretrained language models can easily produce toxic or biased content, which is prohibitive for practical use. In order to detect such toxic generations, existing methods rely on templates, real-world data extraction, crowdsourcing workers or automatic generation to construct adversarial contexts that are likely to induce toxic generations. However, what type of context is more likely to induce unsafe responses is still under-explored. In this paper, we identify that context toxicity and context category (e.g., profanity, insult, drugs, etc.) are two important factors to cause safety issues in response generation. Hence, we propose a method called reverse generation to construct adversarial contexts conditioned on a given response, with the flexibility to control category, toxicity level and inductivity of the generated contexts. Via reverse generation, we augment the existing BAD dataset and construct a new dataset BAD+ which contains more than 120K diverse and highly inductive contexts in 12 categories. We test three popular pretrained dialogue models (Blender, DialoGPT and Plato2) and find that BAD+ can largely expose their safety problems. Furthermore, we show that BAD+ can greatly enhance the safety of generation, and we reveal the key factors of safety improvement. Our code and dataset is available at https://github.com/thu-coai/Reverse_Generation.", + "author": "Zhexin Zhang; Jiale Cheng; Hao Sun; Jiawen Deng; Fei Mi; Yasheng Wang; Lifeng Shang; Minlie Huang", + "authorids": "/z/zhexin-zhang/; /j/jiale-cheng/; /h/hao-sun/; /j/jiawen-deng/; /f/fei-mi/; /y/yasheng-wang/; /l/lifeng-shang/; /m/minlie-huang/", + "bibtex": "@inproceedings{zhang-etal-2022-constructing,\n title = \"Constructing Highly Inductive Contexts for Dialogue Safety through Controllable Reverse Generation\",\n author = \"Zhang, Zhexin and\n Cheng, Jiale and\n Sun, Hao and\n Deng, Jiawen and\n Mi, Fei and\n Wang, Yasheng and\n Shang, Lifeng and\n Huang, Minlie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.270/\",\n doi = \"10.18653/v1/2022.findings-emnlp.270\",\n pages = \"3684--3697\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.270.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.270/", + "pdf_size": 1671875, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15937752971975407541&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/thu-coai/Reverse_Generation", + "project": "", + "author_num": 8 + }, + { + "id": "2022.emnlp-industry.10", + "title": "Consultation Checklists: Standardising the Human Evaluation of Medical Note Generation", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Evaluating automatically generated text is generally hard due to the inherently subjective nature of many aspects of the output quality. This difficulty is compounded in automatic consultation note generation by differing opinions between medical experts both about which patient statements should be included in generated notes and about their respective importance in arriving at a diagnosis. Previous real-world evaluations of note-generation systems saw substantial disagreement between expert evaluators. In this paper we propose a protocol that aims to increase objectivity by grounding evaluations in Consultation Checklists, which are created in a preliminary step and then used as a common point of reference during quality assessment. We observed good levels of inter-annotator agreement in a first evaluation study using the protocol; further, using Consultation Checklists produced in the study as reference for automatic metrics such as ROUGE or BERTScore improves their correlation with human judgements compared to using the original human note.", + "author": "Aleksandar Savkov; Francesco Moramarco; Alex Papadopoulos Korfiatis; Mark Perera; Anya Belz; Ehud Reiter", + "authorids": "/a/aleksandar-savkov/; /f/francesco-moramarco/; /a/alex-papadopoulos-korfiatis/; /m/mark-perera/; /a/anja-belz/; /e/ehud-reiter/", + "bibtex": "@inproceedings{savkov-etal-2022-consultation,\n title = \"Consultation Checklists: Standardising the Human Evaluation of Medical Note Generation\",\n author = \"Savkov, Aleksandar and\n Moramarco, Francesco and\n Papadopoulos Korfiatis, Alex and\n Perera, Mark and\n Belz, Anya and\n Reiter, Ehud\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.10/\",\n doi = \"10.18653/v1/2022.emnlp-industry.10\",\n pages = \"111--120\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.10.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.10/", + "pdf_size": 1026589, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13899176121172585123&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.712", + "title": "Context Limitations Make Neural Language Models More Human-Like", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Language models (LMs) have been used in cognitive modeling as well as engineering studies\u2014they compute information-theoretic complexity metrics that simulate humans\u2019 cognitive load during reading.This study highlights a limitation of modern neural LMs as the model of choice for this purpose: there is a discrepancy between their context access capacities and that of humans.Our results showed that constraining the LMs\u2019 context access improved their simulation of human reading behavior.We also showed that LM-human gaps in context access were associated with specific syntactic constructions; incorporating syntactic biases into LMs\u2019 context access might enhance their cognitive plausibility.", + "author": "Tatsuki Kuribayashi; Yohei Oseki; Ana Brassard; Kentaro Inui", + "authorids": "/t/tatsuki-kuribayashi/; /y/yohei-oseki/; /a/ana-brassard/; /k/kentaro-inui/", + "bibtex": "@inproceedings{kuribayashi-etal-2022-context,\n title = \"Context Limitations Make Neural Language Models More Human-Like\",\n author = \"Kuribayashi, Tatsuki and\n Oseki, Yohei and\n Brassard, Ana and\n Inui, Kentaro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.712/\",\n doi = \"10.18653/v1/2022.emnlp-main.712\",\n pages = \"10421--10436\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.712.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.712/", + "pdf_size": 879300, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17293749091084603142&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Tohoku University+Langsmith Inc.; University of Tokyo; Tohoku University+RIKEN; Tohoku University+RIKEN", + "aff_domain": "tohoku.ac.jp;g.ecc.u-tokyo.ac.jp;riken.jp;tohoku.ac.jp", + "email": "tohoku.ac.jp;g.ecc.u-tokyo.ac.jp;riken.jp;tohoku.ac.jp", + "github": "https://github.com/kuribayashi4/context_limitation_cognitive_modeling", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;0+3;0+3", + "aff_unique_norm": "Tohoku University;Langsmith Inc.;University of Tokyo;RIKEN", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.tohoku.ac.jp;;https://www.u-tokyo.ac.jp;https://www.riken.jp", + "aff_unique_abbr": "Tohoku U;;UTokyo;RIKEN", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0+0;0+0", + "aff_country_unique": "Japan;United States" + }, + { + "id": "2022.emnlp-main.309", + "title": "Context Matters for Image Descriptions for Accessibility: Challenges for Referenceless Evaluation Metrics", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Few images on the Web receive alt-text descriptions that would make them accessible to blind and low vision (BLV) users. Image-based NLG systems have progressed to the point where they can begin to address this persistent societal problem, but these systems will not be fully successful unless we evaluate them on metrics that guide their development correctly. Here, we argue against current referenceless metrics \u2013 those that don\u2019t rely on human-generated ground-truth descriptions \u2013 on the grounds that they do not align with the needs of BLV users. The fundamental shortcoming of these metrics is that they do not take context into account, whereas contextual information is highly valued by BLV users. To substantiate these claims, we present a study with BLV participants who rated descriptions along a variety of dimensions. An in-depth analysis reveals that the lack of context-awareness makes current referenceless metrics inadequate for advancing image accessibility. As a proof-of-concept, we provide a contextual version of the referenceless metric CLIPScore which begins to address the disconnect to the BLV data.", + "author": "Elisa Kreiss; Cynthia Bennett; Shayan Hooshmand; Eric Zelikman; Meredith Ringel Morris; Christopher Potts", + "authorids": "/e/elisa-kreiss/; /c/cynthia-bennett/; /s/shayan-hooshmand/; /e/eric-zelikman/; /m/meredith-ringel-morris/; /c/christopher-potts/", + "bibtex": "@inproceedings{kreiss-etal-2022-context,\n title = \"Context Matters for Image Descriptions for Accessibility: Challenges for Referenceless Evaluation Metrics\",\n author = \"Kreiss, Elisa and\n Bennett, Cynthia and\n Hooshmand, Shayan and\n Zelikman, Eric and\n Ringel Morris, Meredith and\n Potts, Christopher\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.309/\",\n doi = \"10.18653/v1/2022.emnlp-main.309\",\n pages = \"4685--4697\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.309.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.309/", + "pdf_size": 3961978, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17487406532321598639&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Stanford University; Google Research; Columbia University; Stanford University; Google Research; Stanford University", + "aff_domain": "stanford.edu; ; ; ; ; ", + "email": "stanford.edu; ; ; ; ; ", + "github": "https://elisakreiss.github.io/contextual-description-evaluation/paper/reflessmetrics.html", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;1;0", + "aff_unique_norm": "Stanford University;Google;Columbia University", + "aff_unique_dep": ";Google Research;", + "aff_unique_url": "https://www.stanford.edu;https://research.google;https://www.columbia.edu", + "aff_unique_abbr": "Stanford;Google Research;Columbia", + "aff_campus_unique_index": "0;1;0;1;0", + "aff_campus_unique": "Stanford;Mountain View;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.306", + "title": "Context-Situated Pun Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Previous work on pun generation commonly begins with a given pun word (a pair of homophones for heterographic pun generation and a polyseme for homographic pun generation) and seeks to generate an appropriate pun. While this may enable efficient pun generation, we believe that a pun is most entertaining if it fits appropriately within a given context, e.g., a given situation or dialogue. In this work, we propose a new task, context-situated pun generation, where a specific context represented by a set of keywords is provided, and the task is to first identify suitable pun words that are appropriate for the context, then generate puns based on the context keywords and the identified pun words. We collect a new dataset, CUP (Context-sitUated Pun), containing 4.5k tuples of context words and pun pairs. Based on the new data and setup, we propose a pipeline system for context-situated pun generation, including a pun word retrieval module that identifies suitable pun words for a given context, and a pun generation module that generates puns from context keywords and pun words. Human evaluation shows that 69% of our top retrieved pun words can be used to generate context-situated puns, and our generation module yields successful puns 31% of the time given a plausible tuple of context words and pun pair, almost tripling the yield of a state-of-the-art pun generation model. With an end-to-end evaluation, our pipeline system with the top-1 retrieved pun pair for a given context can generate successful puns 40% of the time, better than all other modeling variations but 32% lower than the human success rate. This highlights the difficulty of the task, and encourages more research in this direction.", + "author": "Jiao Sun; Anjali Narayan-Chen; Shereen Oraby; Shuyang Gao; Tagyoung Chung; Jing Huang; Yang Liu; Nanyun Peng", + "authorids": "/j/jiao-sun/; /a/anjali-narayan-chen/; /s/shereen-oraby/; /s/shuyang-gao/; /t/tagyoung-chung/; /j/jing-huang/; /y/yang-liu/; /n/nanyun-peng/", + "bibtex": "@inproceedings{sun-etal-2022-context,\n title = \"Context-Situated Pun Generation\",\n author = \"Sun, Jiao and\n Narayan-Chen, Anjali and\n Oraby, Shereen and\n Gao, Shuyang and\n Chung, Tagyoung and\n Huang, Jing and\n Liu, Yang and\n Peng, Nanyun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.306/\",\n doi = \"10.18653/v1/2022.emnlp-main.306\",\n pages = \"4635--4648\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.306.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.306/", + "pdf_size": 1667741, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17049620657591615745&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Southern California; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; University of Southern California + University of California, Los Angeles", + "aff_domain": "usc.edu;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;cs.ucla.edu", + "email": "usc.edu;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;cs.ucla.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;1;1;1;1;1;0+2", + "aff_unique_norm": "University of Southern California;Amazon;University of California, Los Angeles", + "aff_unique_dep": ";Alexa AI;", + "aff_unique_url": "https://www.usc.edu;https://www.amazon.com;https://www.ucla.edu", + "aff_unique_abbr": "USC;Amazon;UCLA", + "aff_campus_unique_index": "0;0+0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.251", + "title": "Context-aware Information-theoretic Causal De-biasing for Interactive Sequence Labeling", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Supervised training of existing deep learning models for sequence labeling relies on large scale labeled datasets. Such datasets are generally created with crowd-source labeling. However, crowd-source labeling for tasks of sequence labeling can be expensive and time-consuming. Further, crowd-source labeling by external annotators may not be appropriate for data that contains user private information. Considering the above limitations of crowd-source labeling, we study interactive sequence labeling that allows training directly with the user feedback, which alleviates the annotation cost and maintains the user privacy. We identify two bias, namely, context bias and feedback bias, by formulating interactive sequence labeling via a Structural Causal Model (SCM). To alleviate the context and feedback bias based on the SCM, we identify the frequent context tokens as confounders in the backdoor adjustment and further propose an entropy-based modulation that is inspired by information theory. entities more sample-efficiently. With extensive experiments, we validate that our approach can effectively alleviate the biases and our models can be efficiently learnt with the user feedback.", + "author": "Junda Wu; Rui Wang; Tong Yu; Ruiyi Zhang; Handong Zhao; Shuai Li; Ricardo Henao; Ani Nenkova", + "authorids": "/j/junda-wu/; /r/rui-wang/; /t/tong-yu/; /r/ruiyi-zhang/; /h/handong-zhao/; /s/shuai-li/; /r/ricardo-henao/; /a/ani-nenkova/", + "bibtex": "@inproceedings{wu-etal-2022-context,\n title = \"Context-aware Information-theoretic Causal De-biasing for Interactive Sequence Labeling\",\n author = \"Wu, Junda and\n Wang, Rui and\n Yu, Tong and\n Zhang, Ruiyi and\n Zhao, Handong and\n Li, Shuai and\n Henao, Ricardo and\n Nenkova, Ani\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.251/\",\n doi = \"10.18653/v1/2022.findings-emnlp.251\",\n pages = \"3436--3448\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.251.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.251/", + "pdf_size": 5733184, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15321168017927596611&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 2, + "aff": "New York University; Duke University; Adobe Research; Adobe Research; Adobe Research; Shanghai Jiao Tong University; Duke University; Adobe Research", + "aff_domain": "nyu.edu;duke.edu;adobe.com;adobe.com;adobe.com;sjtu.edu.cn;duke.edu;adobe.com", + "email": "nyu.edu;duke.edu;adobe.com;adobe.com;adobe.com;sjtu.edu.cn;duke.edu;adobe.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;2;2;3;1;2", + "aff_unique_norm": "New York University;Duke University;Adobe;Shanghai Jiao Tong University", + "aff_unique_dep": ";;Adobe Research;", + "aff_unique_url": "https://www.nyu.edu;https://www.duke.edu;https://research.adobe.com;https://www.sjtu.edu.cn", + "aff_unique_abbr": "NYU;Duke;Adobe;SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;1;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.339", + "title": "Contextualizing Language Models for Norms Diverging from Social Majority", + "track": "main", + "status": "finding", + "award": false, + "abstract": "To comprehensibly contextualize decisions, artificial systems in social situations need a high degree of awareness of the rules of conduct of human behavior. Especially transformer-based language models have recently been shown to exhibit some such awareness. But what if norms in some social setting do not adhere to or even blatantly deviate from the mainstream? In this paper, we introduce a novel mechanism based on deontic logic to allow for a flexible adaptation of individual norms by de-biasing training data sets and a task-reduction to textual entailment. Building on the popular \u2018Moral Stories\u2019 dataset we on the one hand highlight the intrinsic bias of current language models, on the other hand characterize the adaptability of pre-trained models to deviating norms in fine-tuning settings.", + "author": "Niklas Kiehne; Hermann Kroll; Wolf-Tilo Balke", + "authorids": "/n/niklas-kiehne/; /h/hermann-kroll/; /w/wolf-tilo-balke/", + "bibtex": "@inproceedings{kiehne-etal-2022-contextualizing,\n title = \"Contextualizing Language Models for Norms Diverging from Social Majority\",\n author = \"Kiehne, Niklas and\n Kroll, Hermann and\n Balke, Wolf-Tilo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.339/\",\n doi = \"10.18653/v1/2022.findings-emnlp.339\",\n pages = \"4620--4633\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.339.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.339/", + "pdf_size": 224741, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6283222844482401623&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 2, + "aff": "Institute for Information Systems TU Braunschweig; Institute for Information Systems TU Braunschweig; Institute for Information Systems TU Braunschweig", + "aff_domain": "ifis.cs.tu-bs.de;ifis.cs.tu-bs.de;ifis.cs.tu-bs.de", + "email": "ifis.cs.tu-bs.de;ifis.cs.tu-bs.de;ifis.cs.tu-bs.de", + "github": "https://github.com/nikrruun/contrastive_moral_stories", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Technical University of Braunschweig", + "aff_unique_dep": "Institute for Information Systems", + "aff_unique_url": "https://www.tu-braunschweig.de", + "aff_unique_abbr": "TU Braunschweig", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Braunschweig", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.111", + "title": "Continual Learning of Neural Machine Translation within Low Forgetting Risk Regions", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This paper considers continual learning of large-scale pretrained neural machine translation model without accessing the previous training data or introducing model separation. We argue that the widely used regularization-based methods, which perform multi-objective learning with an auxiliary loss, suffer from the misestimate problem and cannot always achieve a good balance between the previous and new tasks. To solve the problem, we propose a two-stage training method based on the local features of the real loss. We first search low forgetting risk regions, where the model can retain the performance on the previous task as the parameters are updated, to avoid the catastrophic forgetting problem. Then we can continually train the model within this region only with the new training data to fit the new task. Specifically, we propose two methods to search the low forgetting risk regions, which are based on the curvature of loss and the impacts of the parameters on the model output, respectively. We conduct experiments on domain adaptation and more challenging language adaptation tasks, and the experimental results show that our method can achieve significant improvements compared with several strong baselines.", + "author": "Shuhao Gu; Bojie Hu; Yang Feng", + "authorids": "/s/shuhao-gu/; /b/bojie-hu/; /y/yang-feng/", + "bibtex": "@inproceedings{gu-etal-2022-continual,\n title = \"Continual Learning of Neural Machine Translation within Low Forgetting Risk Regions\",\n author = \"Gu, Shuhao and\n Hu, Bojie and\n Feng, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.111/\",\n doi = \"10.18653/v1/2022.emnlp-main.111\",\n pages = \"1707--1718\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.111.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.111/", + "pdf_size": 2449157, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5069046174170616397&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences; Tencent Minority-Mandarin Translation, Beijing, China; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences", + "aff_domain": "ict.ac.cn;tencent.com;ict.ac.cn", + "email": "ict.ac.cn;tencent.com;ict.ac.cn", + "github": "https://github.com/ictnlp/LFR-NMT", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Tencent", + "aff_unique_dep": "Institute of Computing Technology;;Minority-Mandarin Translation", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.tencent.com", + "aff_unique_abbr": "CAS;UCAS;Tencent", + "aff_campus_unique_index": ";1;", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.695", + "title": "Continual Training of Language Models for Few-Shot Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work on applying large language models (LMs) achieves impressive performance in many NLP applications. Adapting or posttraining an LM using an unlabeled domain corpus can produce even better performance for end-tasks in the domain. This paper proposes the problem of continually extending an LM by incrementally post-train the LM with a sequence of unlabeled domain corpora to expand its knowledge without forgetting its previous skills. The goal is to improve the few-shot end-task learning in these domains. The resulting system is called CPT (Continual PostTraining), which to our knowledge, is the first continual post-training system. Experimental results verify its effectiveness.", + "author": "Zixuan Ke; Haowei Lin; Yijia Shao; Hu Xu; Lei Shu; Bing Liu", + "authorids": "/z/zixuan-ke/; /h/haowei-lin/; /y/yijia-shao/; /h/hu-xu/; /l/lei-shu/; /b/bing-liu/", + "bibtex": "@inproceedings{ke-etal-2022-continual,\n title = \"Continual Training of Language Models for Few-Shot Learning\",\n author = \"Ke, Zixuan and\n Lin, Haowei and\n Shao, Yijia and\n Xu, Hu and\n Shu, Lei and\n Liu, Bing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.695/\",\n doi = \"10.18653/v1/2022.emnlp-main.695\",\n pages = \"10205--10216\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.695.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.695/", + "pdf_size": 454609, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2758751607969780581&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science, University of Illinois at Chicago; Wangxuan Institute of Computer Technology, Peking University; Department of Computer Science, University of Illinois at Chicago; Department of Computer Science, University of Illinois at Chicago; Department of Computer Science, University of Illinois at Chicago; Department of Computer Science, University of Illinois at Chicago", + "aff_domain": "uic.edu;pku.edu.cn;pku.edu.cn;uic.edu;google.com;uic.edu", + "email": "uic.edu;pku.edu.cn;pku.edu.cn;uic.edu;google.com;uic.edu", + "github": "https://github.com/UIC-Liu-Lab/CPT", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;0", + "aff_unique_norm": "University of Illinois at Chicago;Peking University", + "aff_unique_dep": "Department of Computer Science;Wangxuan Institute of Computer Technology", + "aff_unique_url": "https://www.uic.edu;http://www.pku.edu.cn", + "aff_unique_abbr": "UIC;PKU", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Chicago;", + "aff_country_unique_index": "0;1;0;0;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.385", + "title": "Continuation KD: Improved Knowledge Distillation through the Lens of Continuation Optimization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Knowledge Distillation (KD) has been extensively used for natural language understanding (NLU) tasks to improve a small model\u2019s (a student) generalization by transferring the knowledge from a larger model (a teacher). Although KD methods achieve state-of-the-art performance in numerous settings, they suffer from several problems limiting their performance. It is shown in the literature that the capacity gap between the teacher and the student networks can make KD ineffective. Additionally, existing KD techniques do not mitigate the noise in the teacher\u2019s output: modeling the noisy behaviour of the teacher can distract the student from learning more useful features. We propose a new KD method that addresses these problems and facilitates the training compared to previous techniques. Inspired by continuation optimization, we design a training procedure that optimizes the highly non-convex KD objective by starting with the smoothed version of this objective and making it more complex as the training proceeds. Our method (Continuation-KD) achieves state-of-the-art performance across various compact architectures on NLU (GLUE benchmark) and computer vision tasks (CIFAR-10 and CIFAR-100).", + "author": "Aref Jafari; Ivan Kobyzev; Mehdi Rezagholizadeh; Pascal Poupart; Ali Ghodsi", + "authorids": "/a/aref-jafari/; /i/ivan-kobyzev/; /m/mehdi-rezagholizadeh/; /p/pascal-poupart/; /a/ali-ghodsi/", + "bibtex": "@inproceedings{jafari-etal-2022-continuation,\n title = \"Continuation {KD}: Improved Knowledge Distillation through the Lens of Continuation Optimization\",\n author = \"Jafari, Aref and\n Kobyzev, Ivan and\n Rezagholizadeh, Mehdi and\n Poupart, Pascal and\n Ghodsi, Ali\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.385/\",\n doi = \"10.18653/v1/2022.findings-emnlp.385\",\n pages = \"5260--5269\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.385.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.385/", + "pdf_size": 336190, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7309613650278506393&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Waterloo; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; University of Waterloo; University of Waterloo", + "aff_domain": "uwaterloo.ca;huawei.com;huawei.com;uwaterloo.ca;uwaterloo.ca", + "email": "uwaterloo.ca;huawei.com;huawei.com;uwaterloo.ca;uwaterloo.ca", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0;0", + "aff_unique_norm": "University of Waterloo;Huawei", + "aff_unique_dep": ";Noah\u2019s Ark Lab", + "aff_unique_url": "https://uwaterloo.ca;https://www.huawei.com", + "aff_unique_abbr": "UW;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0;0", + "aff_country_unique": "Canada;China" + }, + { + "id": "2022.emnlp-main.300", + "title": "Continued Pretraining for Better Zero- and Few-Shot Promptability", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently introduced language model prompting methods can achieve high accuracy in zero- and few-shot settings while requiring few to no learned task-specific parameters. Nevertheless, these methods still often trail behind full model finetuning. In this work, we investigate if a dedicated continued pretraining stage could improve \u201cpromptability\u201d, i.e., zero-shot performance with natural language prompts or few-shot performance with prompt tuning. We reveal settings where existing continued pretraining methods lack promptability. We also identify current methodological gaps, which we fill with thorough large-scale experiments. We demonstrate that a simple recipe, continued pretraining that incorporates a trainable prompt during multi-task learning, leads to improved promptability in both zero- and few-shot settings compared to existing methods, up to 31% relative. On the other hand, we find that continued pretraining using MAML-style meta-learning, a method that directly optimizes few-shot promptability, yields subpar performance. We validate our findings with two prompt tuning methods, and, based on our results, we provide concrete recommendations to optimize promptability for different use cases.", + "author": "Zhaofeng Wu; Robert L Logan IV; Pete Walsh; Akshita Bhagia; Dirk Groeneveld; Sameer Singh; Iz Beltagy", + "authorids": "/z/zhaofeng-wu/; /r/robert-l-logan-iv/; /p/pete-walsh/; /a/akshita-bhagia/; /d/dirk-groeneveld/; /s/sameer-singh/; /i/iz-beltagy/", + "bibtex": "@inproceedings{wu-etal-2022-continued,\n title = \"Continued Pretraining for Better Zero- and Few-Shot Promptability\",\n author = \"Wu, Zhaofeng and\n Logan IV, Robert L and\n Walsh, Pete and\n Bhagia, Akshita and\n Groeneveld, Dirk and\n Singh, Sameer and\n Beltagy, Iz\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.300/\",\n doi = \"10.18653/v1/2022.emnlp-main.300\",\n pages = \"4517--4531\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.300.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.300/", + "pdf_size": 722809, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7718454480760044483&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff": "Cancer/MIT/Aries; Aries; Libra; Libra; Libra; Libra/Aries; Libra", + "aff_domain": "csail.mit.edu;dataminr.com;allenai.org;allenai.org;allenai.org;uci.edu;allenai.org", + "email": "csail.mit.edu;dataminr.com;allenai.org;allenai.org;allenai.org;uci.edu;allenai.org", + "github": "https://github.com/allenai/better-promptability", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;2;2;2;2", + "aff_unique_norm": "Massachusetts Institute of Technology;Aries;Libra Association", + "aff_unique_dep": ";;", + "aff_unique_url": "https://web.mit.edu;;https://libra.org", + "aff_unique_abbr": "MIT;;Libra", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;2;2;2;2;2", + "aff_country_unique": "United States;;Switzerland" + }, + { + "id": "2022.findings-emnlp.56", + "title": "Contrastive Demonstration Tuning for Pre-trained Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Pretrained language models can be effectively stimulated by textual prompts or demonstrations, especially in low-data scenarios. Recent works have focused on automatically searching discrete or continuous prompts or optimized verbalizers, yet studies for the demonstration are still limited. Concretely, the demonstration examples are crucial for an excellent final performance of prompt-tuning. In this paper, we propose a novel pluggable, extensible, and efficient approach named contrastive demonstration tuning, which is free of demonstration sampling. Furthermore, the proposed approach can be: (i) Plugged into any previous prompt-tuning approaches; (ii) Extended to widespread classification tasks with a large number of categories. Experimental results on 16 datasets illustrate that our method integrated with previous approaches LM-BFF and P-tuning can yield better performance. Code is available in https://github.com/zjunlp/PromptKG/tree/main/research/Demo-Tuning.", + "author": "Xiaozhuan Liang; Ningyu Zhang; Siyuan Cheng; Zhenru Zhang; Chuanqi Tan; Huajun Chen", + "authorids": "/x/xiaozhuan-liang/; /n/ningyu-zhang/; /s/siyuan-cheng/; /z/zhenru-zhang/; /c/chuanqi-tan/; /h/huajun-chen/", + "bibtex": "@inproceedings{liang-etal-2022-contrastive,\n title = \"Contrastive Demonstration Tuning for Pre-trained Language Models\",\n author = \"Liang, Xiaozhuan and\n Zhang, Ningyu and\n Cheng, Siyuan and\n Zhang, Zhenru and\n Tan, Chuanqi and\n Chen, Huajun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.56/\",\n doi = \"10.18653/v1/2022.findings-emnlp.56\",\n pages = \"799--811\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.56.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.56/", + "pdf_size": 489809, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3319843022556522975&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Zhejiang University & AZFT Joint Lab for Knowledge Engine, China+Hangzhou Innovation Center, Zhejiang University, China; Zhejiang University & AZFT Joint Lab for Knowledge Engine, China+Hangzhou Innovation Center, Zhejiang University, China; Zhejiang University & AZFT Joint Lab for Knowledge Engine, China+Hangzhou Innovation Center, Zhejiang University, China; Alibaba Group, China; Alibaba Group, China; Zhejiang University & AZFT Joint Lab for Knowledge Engine, China+Hangzhou Innovation Center, Zhejiang University, China", + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;alibaba-inc.com;alibaba-inc.com;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;alibaba-inc.com;alibaba-inc.com;zju.edu.cn", + "github": "https://github.com/zjunlp/PromptKG/tree/main/research/Demo-Tuning", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0+0;1;1;0+0", + "aff_unique_norm": "Zhejiang University;Alibaba Group", + "aff_unique_dep": "Joint Lab for Knowledge Engine;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "ZJU;Alibaba", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0+0;0+0;0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.338", + "title": "Contrastive Learning enhanced Author-Style Headline Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Headline generation is a task of generating an appropriate headline for a given article, which can be further used for machine-aided writing or enhancing the click-through ratio. Current works only use the article itself in the generation, but have not taken the writing style of headlines into consideration. In this paper, we propose a novel Seq2Seq model called CLH3G (Contrastive Learning enhanced Historical Headlines based Headline Generation) which can use the historical headlines of the articles that the author wrote in the past to improve the headline generation of current articles. By taking historical headlines into account, we can integrate the stylistic features of the author into our model, and generate a headline not only appropriate for the article, but also consistent with the author\u2019s style. In order to efficiently learn the stylistic features of the author, we further introduce a contrastive learning based auxiliary task for the encoder of our model. Besides, we propose two methods to use the learned stylistic features to guide both the pointer and the decoder during the generation. Experimental results show that historical headlines of the same user can improve the headline generation significantly, and both the contrastive learning module and the two style features fusion methods can further boost the performance.", + "author": "Hui Liu; Weidong Guo; Yige Chen; Xiangyang Li", + "authorids": "/h/hui-liu/; /w/weidong-guo/; /y/yige-chen/; /x/xiangyang-li/", + "bibtex": "@inproceedings{liu-etal-2022-contrastive,\n title = \"Contrastive Learning enhanced Author-Style Headline Generation\",\n author = \"Liu, Hui and\n Guo, Weidong and\n Chen, Yige and\n Li, Xiangyang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.338/\",\n doi = \"10.18653/v1/2022.emnlp-main.338\",\n pages = \"5063--5072\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.338.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.338/", + "pdf_size": 468875, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5258801036407763751&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Platform and Content Group, Tencent; Platform and Content Group, Tencent + College of Computer Science and Artificial Intelligence, Wenzhou University; College of Computer Science and Artificial Intelligence, Wenzhou University; Platform and Content Group, Tencent", + "aff_domain": "tencent.com;tencent.com;wzu.edu.cn;tencent.com", + "email": "tencent.com;tencent.com;wzu.edu.cn;tencent.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+1;1;0", + "aff_unique_norm": "Tencent;Wenzhou University", + "aff_unique_dep": "Platform and Content Group;College of Computer Science and Artificial Intelligence", + "aff_unique_url": "https://www.tencent.com;https://www.wzu.edu.cn", + "aff_unique_abbr": "Tencent;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.586", + "title": "Contrastive Learning with Expectation-Maximization for Weakly Supervised Phrase Grounding", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Weakly supervised phrase grounding aims to learn an alignment between phrases in a caption and objects in a corresponding image using only caption-image annotations, i.e., without phrase-object annotations. Previous methods typically use a caption-image contrastive loss to indirectly supervise the alignment between phrases and objects, which hinders the maximum use of the intrinsic structure of the multimodal data and leads to unsatisfactory performance. In this work, we directly use the phrase-object contrastive loss in the condition that no positive annotation is available in the first place. Specifically, we propose a novel contrastive learning framework based on the expectation-maximization algorithm that adaptively refines the target prediction. Experiments on two widely used benchmarks, Flickr30K Entities and RefCOCO+, demonstrate the effectiveness of our framework. We obtain 63.05% top-1 accuracy on Flickr30K Entities and 59.51%/43.46% on RefCOCO+ TestA/TestB, outperforming the previous methods by a large margin, even surpassing a previous SoTA that uses a pre-trained vision-language model. Furthermore, we deliver a theoretical analysis of the effectiveness of our method from the perspective of the maximum likelihood estimate with latent variables.", + "author": "Keqin Chen; Richong Zhang; Samuel Mensah; Yongyi Mao", + "authorids": "/k/keqin-chen/; /r/richong-zhang/; /s/samuel-mensah/; /y/yongyi-mao/", + "bibtex": "@inproceedings{chen-etal-2022-contrastive,\n title = \"Contrastive Learning with Expectation-Maximization for Weakly Supervised Phrase Grounding\",\n author = \"Chen, Keqin and\n Zhang, Richong and\n Mensah, Samuel and\n Mao, Yongyi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.586/\",\n doi = \"10.18653/v1/2022.emnlp-main.586\",\n pages = \"8549--8559\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.586.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.586/", + "pdf_size": 1697067, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9764403694195837884&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "SKLSDE, Beihang University, Beijing, China; SKLSDE, Beihang University, Beijing, China + Zhongguancun Laboratory, Beijing, China; Department of Computer Science, University of Sheffield, UK; School of Electrical Engineering and Computer Science, University of Ottawa, Canada", + "aff_domain": "act.buaa.edu.cn;act.buaa.edu.cn;sheffield.ac.uk;uottawa.ca", + "email": "act.buaa.edu.cn;act.buaa.edu.cn;sheffield.ac.uk;uottawa.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+1;2;3", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory;University of Sheffield;University of Ottawa", + "aff_unique_dep": "SKLSDE;;Department of Computer Science;School of Electrical Engineering and Computer Science", + "aff_unique_url": "http://www.buaa.edu.cn;;https://www.sheffield.ac.uk;https://www.uottawa.ca", + "aff_unique_abbr": ";;Sheffield;U Ottawa", + "aff_campus_unique_index": "0;0;2", + "aff_campus_unique": "Beijing;;Ottawa", + "aff_country_unique_index": "0;0+0;1;2", + "aff_country_unique": "China;United Kingdom;Canada" + }, + { + "id": "2022.findings-emnlp.522", + "title": "Contrastive Learning with Prompt-derived Virtual Semantic Prototypes for Unsupervised Sentence Embedding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Contrastive learning has become a new paradigm for unsupervised sentence embeddings.Previous studies focus on instance-wise contrastive learning, attempting to construct positive pairs with textual data augmentation. In this paper, we propose a novel Contrastive learning method with Prompt-derived Virtual semantic Prototypes (ConPVP). Specifically, with the help of prompts, we construct virtual semantic prototypes to each instance, and derive negative prototypes by using the negative form of the prompts.Using a prototypical contrastive loss, we enforce the anchor sentence embedding to be close to its corresponding semantic prototypes, and far apart from the negative prototypes as well as the prototypes of other sentences.Extensive experimental results on semantic textual similarity, transfer, and clustering tasks demonstrate the effectiveness of our proposed model compared to strong baselines.Code is available at https://github.com/lemon0830/promptCSE.", + "author": "Jiali Zeng; Yongjing Yin; Yufan Jiang; Shuangzhi Wu; Yunbo Cao", + "authorids": "/j/jiali-zeng/; /y/yongjing-yin/; /y/yufan-jiang/; /s/shuangzhi-wu/; /y/yunbo-cao/", + "bibtex": "@inproceedings{zeng-etal-2022-contrastive,\n title = \"Contrastive Learning with Prompt-derived Virtual Semantic Prototypes for Unsupervised Sentence Embedding\",\n author = \"Zeng, Jiali and\n Yin, Yongjing and\n Jiang, Yufan and\n Wu, Shuangzhi and\n Cao, Yunbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.522/\",\n doi = \"10.18653/v1/2022.findings-emnlp.522\",\n pages = \"7042--7053\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.522.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.522/", + "pdf_size": 1798912, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11203760472941222955&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 3, + "aff": "Tencent Cloud Xiaowei, Beijing, China; Westlake University, Zhejiang, China + Tencent Cloud Xiaowei, Beijing, China; Tencent Cloud Xiaowei, Beijing, China; Tencent Cloud Xiaowei, Beijing, China; Tencent Cloud Xiaowei, Beijing, China", + "aff_domain": "tencent.com;westlake.edu.cn;tencent.com;tencent.com;tencent.com", + "email": "tencent.com;westlake.edu.cn;tencent.com;tencent.com;tencent.com", + "github": "https://github.com/lemon0830/promptCSE", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+0;0;0;0", + "aff_unique_norm": "Tencent Cloud Xiaowei;Westlake University", + "aff_unique_dep": ";", + "aff_unique_url": "https://cloud.tencent.com;https://www.westlake.edu.cn", + "aff_unique_abbr": "Tencent Cloud;", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.318", + "title": "Controllable Dialogue Simulation with In-context Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Building dialogue systems requires a large corpus of annotated dialogues. Such datasets are usually created via crowdsourcing, which is expensive and time-consuming. In this paper, we propose Dialogic, a novel dialogue simulation method based on large language model in-context learning to automate dataset creation. Seeded with a few annotated dialogues, Dialogic automatically selects in-context examples for demonstration and prompts GPT-3 to generate new dialogues and annotations in a controllable way. Our method can rapidly expand a small set of dialogue data with minimum or zero human involvement and parameter update and is thus much more cost-efficient and time-saving than crowdsourcing. Experimental results on the MultiWOZ dataset demonstrate that training a model on the simulated dialogues leads to even better performance than using the same amount of human-generated dialogues under the challenging low-resource settings, with as few as 85 dialogues as a seed. When the full training set is given, our method can still serve as an effective data augmentation method to further improve performance. Human evaluation results also show that our simulated dialogues have near-human fluency and annotation accuracy. The code and data are available at https://github.com/Leezekun/dialogic.", + "author": "Zekun Li; Wenhu Chen; Shiyang Li; Hong Wang; Jing Qian; Xifeng Yan", + "authorids": "/z/zekun-li/; /w/wenhu-chen/; /s/shiyang-li/; /h/hong-wang/; /j/jing-qian/; /x/xifeng-yan/", + "bibtex": "@inproceedings{li-etal-2022-controllable,\n title = \"Controllable Dialogue Simulation with In-context Learning\",\n author = \"Li, Zekun and\n Chen, Wenhu and\n Li, Shiyang and\n Wang, Hong and\n Qian, Jing and\n Yan, Xifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.318/\",\n doi = \"10.18653/v1/2022.findings-emnlp.318\",\n pages = \"4330--4347\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.318.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.318/", + "pdf_size": 1184410, + "gs_citation": 46, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2825087785448178294&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of California, Santa Barbara; University of Waterloo, Vector Institute; University of California, Santa Barbara; University of California, Santa Barbara; University of California, Santa Barbara; University of California, Santa Barbara", + "aff_domain": "cs.ucsb.edu;uwaterloo.ca;cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu", + "email": "cs.ucsb.edu;uwaterloo.ca;cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu", + "github": "https://github.com/Leezekun/dialogic", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;0", + "aff_unique_norm": "University of California, Santa Barbara;University of Waterloo", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucsb.edu;https://uwaterloo.ca", + "aff_unique_abbr": "UCSB;UW", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Santa Barbara;", + "aff_country_unique_index": "0;1;0;0;0;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "2022.findings-emnlp.98", + "title": "Controllable Factuality in Document-Grounded Dialog Systems Using a Noisy Channel Model", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this work, we present a model for document-grounded response generation in dialog that is decomposed into two components according to Bayes\u2019 theorem.One component is a traditional ungrounded response generation model and the other component models the reconstruction of the grounding document based on the dialog context and generated response.We propose different approximate decoding schemes and evaluate our approach on multiple open-domain and task-oriented document-grounded dialog datasets.Our experiments show that the model is more factual in terms of automatic factuality metrics than the baseline model.Furthermore, we outline how introducing scaling factors between the components allows for controlling the tradeoff between factuality and fluency in the model output.Finally, we compare our approach to a recently proposed method to control factuality in grounded dialog, CTRL (Rashkin et al., 2021), and show that both approaches can be combined to achieve additional improvements.", + "author": "Nico Daheim; David Thulke; Christian Dugast; Hermann Ney", + "authorids": "/n/nico-daheim/; /d/david-thulke/; /c/christian-dugast/; /h/hermann-ney/", + "bibtex": "@inproceedings{daheim-etal-2022-controllable,\n title = \"Controllable Factuality in Document-Grounded Dialog Systems Using a Noisy Channel Model\",\n author = \"Daheim, Nico and\n Thulke, David and\n Dugast, Christian and\n Ney, Hermann\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.98/\",\n doi = \"10.18653/v1/2022.findings-emnlp.98\",\n pages = \"1365--1381\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.98.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.98/", + "pdf_size": 370248, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10059041685831228923&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Ubiquitous Knowledge Processing Lab, Department of Computer Science, Technical University of Darmstadt; Chair for Human Language Technology and Pattern Recognition, RWTH Aachen University + AppTek GmbH; AppTek GmbH; Chair for Human Language Technology and Pattern Recognition, RWTH Aachen University + AppTek GmbH", + "aff_domain": "ukp.tu-darmstadt.de;i6.informatik.rwth-aachen.de;appTek.com;i6.informatik.rwth-aachen.de", + "email": "ukp.tu-darmstadt.de;i6.informatik.rwth-aachen.de;appTek.com;i6.informatik.rwth-aachen.de", + "github": "https://github.com/ndaheim/noisy_channel_model", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2;2;1+2", + "aff_unique_norm": "Technical University of Darmstadt;RWTH Aachen University;AppTek", + "aff_unique_dep": "Department of Computer Science;Chair for Human Language Technology and Pattern Recognition;", + "aff_unique_url": "https://www.tu-darmstadt.de;https://www.rwth-aachen.de;https://www.app-tek.com", + "aff_unique_abbr": "TUD;RWTH;AppTek", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Aachen", + "aff_country_unique_index": "0;0+0;0;0+0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.486", + "title": "Controllable Fake Document Infilling for Cyber Deception", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent works in cyber deception study how to deter malicious intrusion by generating multiple fake versions of a critical document to impose costs on adversaries who need to identify the correct information. However, existing approaches are context-agnostic, resulting in sub-optimal and unvaried outputs. We propose a novel context-aware model, Fake Document Infilling (FDI), by converting the problem to a controllable mask-then-infill procedure. FDI masks important concepts of varied lengths in the document, then infills a realistic but fake alternative considering both the previous and future contexts. We conduct comprehensive evaluations on technical documents and news stories. Results show that FDI outperforms the baselines in generating highly believable fakes with moderate modification to protect critical information and deceive adversaries.", + "author": "Yibo Hu; Yu Lin; Erick Skorupa Parolin; Latifur Khan; Kevin Hamlen", + "authorids": "/y/yibo-hu/; /y/yu-lin/; /e/erick-skorupa-parolin/; /l/latifur-khan/; /k/kevin-hamlen/", + "bibtex": "@inproceedings{hu-etal-2022-controllable,\n title = \"Controllable Fake Document Infilling for Cyber Deception\",\n author = \"Hu, Yibo and\n Lin, Yu and\n Skorupa Parolin, Erick and\n Khan, Latifur and\n Hamlen, Kevin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.486/\",\n doi = \"10.18653/v1/2022.findings-emnlp.486\",\n pages = \"6505--6519\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.486.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.486/", + "pdf_size": 613827, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10325161531117746167&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "The University of Texas at Dallas; The University of Texas at Dallas; The University of Texas at Dallas; The University of Texas at Dallas; The University of Texas at Dallas", + "aff_domain": "utdallas.edu;utdallas.edu;utdallas.edu;utdallas.edu;utdallas.edu", + "email": "utdallas.edu;utdallas.edu;utdallas.edu;utdallas.edu;utdallas.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of Texas at Dallas", + "aff_unique_dep": "", + "aff_unique_url": "https://www.utdallas.edu", + "aff_unique_abbr": "UT Dallas", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Dallas", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.30", + "title": "Controlled Language Generation for Language Learning Items", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "This work aims to employ natural language generation (NLG) to rapidly generate items for English language learning applications: this requires both language models capable of generating fluent, high-quality English, and to control the output of the generation to match the requirements of the relevant items. We experiment with deep pretrained models for this task, developing novel methods for controlling items for factors relevant in language learning: diverse sentences for different proficiency levels and argument structure to test grammar. Human evaluation demonstrates high grammatically scores for all models (3.4 and above out of 4), and higher length (24%) and complexity (9%) over the baseline for the advanced proficiency model. Our results show that we can achieve strong performance while adding additional control to ensure diverse, tailored content for individual users.", + "author": "Kevin Stowe; Debanjan Ghosh; Mengxuan Zhao", + "authorids": "/k/kevin-stowe/; /d/debanjan-ghosh/; /m/mengxuan-zhao/", + "bibtex": "@inproceedings{stowe-etal-2022-controlled,\n title = \"Controlled Language Generation for Language Learning Items\",\n author = \"Stowe, Kevin and\n Ghosh, Debanjan and\n Zhao, Mengxuan\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.30/\",\n doi = \"10.18653/v1/2022.emnlp-industry.30\",\n pages = \"294--305\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.30.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.30/", + "pdf_size": 507549, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8439720127272394774&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Educational Testing Service; Educational Testing Service; Educational Testing Service", + "aff_domain": "ets.org;ets.org;etscanada.ca", + "email": "ets.org;ets.org;etscanada.ca", + "github": "https://github.com/EducationalTestingService/concept-control-gen", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Educational Testing Service", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ets.org", + "aff_unique_abbr": "ETS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.385", + "title": "Controlled Text Reduction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Producing a reduced version of a source text, as in generic or focused summarization, inherently involves two distinct subtasks: deciding on targeted content and generating a coherent text conveying it. While some popular approaches address summarization as a single end-to-end task, prominent works support decomposed modeling for individual subtasks. Further, semi-automated text reduction is also very appealing, where users may identify targeted content while models would generate a corresponding coherent summary.In this paper, we focus on the second subtask, of generating coherent text given pre-selected content. Concretely, we formalize Controlled Text Reduction as a standalone task, whose input is a source text with marked spans of targeted content (\u201chighlighting\u201d).A model then needs to generate a coherent text that includes all and only the target information.We advocate the potential of such models, both for modular fully-automatic summarization, as well as for semi-automated human-in-the-loop use cases.Facilitating proper research, we crowdsource high-quality dev and test datasets for the task. Further, we automatically generate a larger \u201csilver\u201d training dataset from available summarization benchmarks, leveraging a pretrained summary-source alignment model.Finally, employing these datasets, we present a supervised baseline model, showing promising results and insightful analyses.", + "author": "Aviv Slobodkin; Paul Roit; Eran Hirsch; Ori Ernst; Ido Dagan", + "authorids": "/a/aviv-slobodkin/; /p/paul-roit/; /e/eran-hirsch/; /o/ori-ernst/; /i/ido-dagan/", + "bibtex": "@inproceedings{slobodkin-etal-2022-controlled,\n title = \"Controlled Text Reduction\",\n author = \"Slobodkin, Aviv and\n Roit, Paul and\n Hirsch, Eran and\n Ernst, Ori and\n Dagan, Ido\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.385/\",\n doi = \"10.18653/v1/2022.emnlp-main.385\",\n pages = \"5699--5715\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.385.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.385/", + "pdf_size": 1202877, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2463888831572653333&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 5, + "aff": "Bar-Ilan University; Bar-Ilan University; Bar-Ilan University; Bar-Ilan University; Bar-Ilan University", + "aff_domain": "gmail.com;gmail.com;gmail.com;gmail.com;cs.biu.ac.il", + "email": "gmail.com;gmail.com;gmail.com;gmail.com;cs.biu.ac.il", + "github": "https://github.com/lovodkin93/Controlled_Text_Reduction", + "project": "https://huggingface.co/datasets/biu-nlp/Controlled-Text-Reduction-dataset", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Bar-Ilan University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.biu.ac.il", + "aff_unique_abbr": "BIU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.findings-emnlp.431", + "title": "Controlling Bias Exposure for Fair Interpretable Predictions", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent work on reducing bias in NLP models usually focuses on protecting or isolating information related to a sensitive attribute (like gender or race). However, when sensitive information is semantically entangled with the task information of the input, e.g., gender information is predictive for a profession, a fair trade-off between task performance and bias mitigation is difficult to achieve. Existing approaches perform this trade-off by eliminating bias information from the latent space, lacking control over how much bias is necessarily required to be removed. We argue that a favorable debiasing method should use sensitive information \u2018fairly\u2019, rather than blindly eliminating it (Caliskan et al., 2017; Sun et al., 2019; Bogen et al., 2020). In this work, we provide a novel debiasing algorithm by adjustingthe predictive model\u2019s belief to (1) ignore the sensitive information if it is not useful for the task; (2) use sensitive information minimally as necessary for the prediction (while also incurring a penalty). Experimental results on two text classification tasks (influenced by gender) and an open-ended generation task (influenced by race) indicate that our model achieves a desirable trade-off between debiasing and task performance along with producing debiased rationales as evidence.", + "author": "Zexue He; Yu Wang; Julian McAuley; Bodhisattwa Prasad Majumder", + "authorids": "/z/zexue-he/; /y/yu-wang/; /j/julian-mcauley/; /b/bodhisattwa-prasad-majumder/", + "bibtex": "@inproceedings{he-etal-2022-controlling,\n title = \"Controlling Bias Exposure for Fair Interpretable Predictions\",\n author = \"He, Zexue and\n Wang, Yu and\n McAuley, Julian and\n Majumder, Bodhisattwa Prasad\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.431/\",\n doi = \"10.18653/v1/2022.findings-emnlp.431\",\n pages = \"5854--5866\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.431.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.431/", + "pdf_size": 489356, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10291969166812081395&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science and Engineering, University of California, San Diego; Department of Computer Science and Engineering, University of California, San Diego; Department of Computer Science and Engineering, University of California, San Diego; Department of Computer Science and Engineering, University of California, San Diego", + "aff_domain": "eng.ucsd.edu;ucsd.edu;eng.ucsd.edu;eng.ucsd.edu", + "email": "eng.ucsd.edu;ucsd.edu;eng.ucsd.edu;eng.ucsd.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of California, San Diego", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.ucsd.edu", + "aff_unique_abbr": "UCSD", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "San Diego", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.421", + "title": "ConvFinQA: Exploring the Chain of Numerical Reasoning in Conversational Finance Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "With the recent advance in large pre-trained language models, researchers have achieved record performances in NLP tasks that mostly focus on language pattern matching. The community is experiencing the shift of the challenge from how to model language to the imitation of complex reasoning abilities like human beings. In this work, we investigate the application domain of finance that involves real-world, complex numerical reasoning. We propose a new large-scale dataset, ConvFinQA, aiming to study the chain of numerical reasoning in conversational question answering. Our dataset poses great challenge in modeling long-range, complex numerical reasoning paths in real-world conversations. We conduct comprehensive experiments and analyses with both the neural symbolic methods and the prompting-based methods, to provide insights into the reasoning mechanisms of these two divisions. We believe our new dataset should serve as a valuable resource to push forward the exploration of real-world, complex reasoning tasks as the next research focus. Our dataset and code is publicly available at https://github.com/czyssrs/ConvFinQA.", + "author": "Zhiyu Chen; Shiyang Li; Charese Smiley; Zhiqiang Ma; Sameena Shah; William Yang Wang", + "authorids": "/z/zhiyu-chen/; /s/shiyang-li/; /c/charese-smiley/; /z/zhiqiang-ma/; /s/sameena-shah/; /w/william-yang-wang/", + "bibtex": "@inproceedings{chen-etal-2022-convfinqa,\n title = \"{C}onv{F}in{QA}: Exploring the Chain of Numerical Reasoning in Conversational Finance Question Answering\",\n author = \"Chen, Zhiyu and\n Li, Shiyang and\n Smiley, Charese and\n Ma, Zhiqiang and\n Shah, Sameena and\n Wang, William Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.421/\",\n doi = \"10.18653/v1/2022.emnlp-main.421\",\n pages = \"6279--6292\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.421.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.421/", + "pdf_size": 1117309, + "gs_citation": 105, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9469593417998790249&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "University of California, Santa Barbara; University of California, Santa Barbara; J.P. Morgan; J.P. Morgan; J.P. Morgan; University of California, Santa Barbara", + "aff_domain": "cs.ucsb.edu;cs.ucsb.edu;jpmchase.com;jpmchase.com;jpmchase.com;cs.ucsb.edu", + "email": "cs.ucsb.edu;cs.ucsb.edu;jpmchase.com;jpmchase.com;jpmchase.com;cs.ucsb.edu", + "github": "https://github.com/czyssrs/ConvFinQA", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;1;0", + "aff_unique_norm": "University of California, Santa Barbara;J.P. Morgan", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucsb.edu;https://www.jpmorganchase.com", + "aff_unique_abbr": "UCSB;JPM", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Santa Barbara;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.190", + "title": "ConvTrans: Transforming Web Search Sessions for Conversational Dense Retrieval", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Conversational search provides users with a natural and convenient new search experience. Recently, conversational dense retrieval has shown to be a promising technique for realizing conversational search. However, as conversational search systems have not been widely deployed, it is hard to get large-scale real conversational search sessions and relevance labels to support the training of conversational dense retrieval. To tackle this data scarcity problem, previous methods focus on developing better few-shot learning approaches or generating pseudo relevance labels, but the data they use for training still heavily rely on manual generation.In this paper, we present ConvTrans, a data augmentation method that can automatically transform easily-accessible web search sessions into conversational search sessions to fundamentally alleviate the data scarcity problem for conversational dense retrieval. ConvTrans eliminates the gaps between these two types of sessions in terms of session quality and query form to achieve effective session transformation. Extensive evaluations on two widely used conversational search benchmarks, i.e., CAsT-19 and CAsT-20, demonstrate that the same model trained on the data generated by ConvTrans can achieve comparable retrieval performance as it trained on high-quality but expensive artificial conversational search data.", + "author": "Kelong Mao; Zhicheng Dou; Hongjin Qian; Fengran Mo; Xiaohua Cheng; Zhao Cao", + "authorids": "/k/kelong-mao/; /z/zhicheng-dou/; /h/hongjin-qian/; /f/fengran-mo/; /x/xiaohua-cheng/; /z/zhao-cao/", + "bibtex": "@inproceedings{mao-etal-2022-convtrans,\n title = \"{C}onv{T}rans: Transforming Web Search Sessions for Conversational Dense Retrieval\",\n author = \"Mao, Kelong and\n Dou, Zhicheng and\n Qian, Hongjin and\n Mo, Fengran and\n Cheng, Xiaohua and\n Cao, Zhao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.190/\",\n doi = \"10.18653/v1/2022.emnlp-main.190\",\n pages = \"2935--2946\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.190.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.190/", + "pdf_size": 593241, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9415448642628083116&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Gaoling School of Artificial Intelligence, Renmin University of China; Gaoling School of Artificial Intelligence, Renmin University of China; Gaoling School of Artificial Intelligence, Renmin University of China; Universit\u00e9 de Montr\u00e9al, Qu\u00e9bec, Canada; Huawei Poisson Lab; Huawei Poisson Lab", + "aff_domain": "ruc.edu.cn;ruc.edu.cn; ; ; ;", + "email": "ruc.edu.cn;ruc.edu.cn; ; ; ;", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;2;2", + "aff_unique_norm": "Renmin University of China;Universit\u00e9 de Montr\u00e9al;Huawei", + "aff_unique_dep": "Gaoling School of Artificial Intelligence;;Poisson Lab", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.umontreal.ca;https://www.huawei.com", + "aff_unique_abbr": "RUC;UdeM;Huawei", + "aff_campus_unique_index": "0;0;0;1", + "aff_campus_unique": "Beijing;Montr\u00e9al;", + "aff_country_unique_index": "0;0;0;1;0;0", + "aff_country_unique": "China;Canada" + }, + { + "id": "2022.findings-emnlp.217", + "title": "Conversation Disentanglement with Bi-Level Contrastive Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Conversation disentanglement aims to group utterances into detached sessions, which is a fundamental task in processing multi-party conversations. Existing methods have two main drawbacks. First, they overemphasize pairwise utterance relations but pay inadequate attention to the utterance-to-context relation modeling. Second, huge amount of human annotated data is required for training, which is expensive to obtain in practice. To address these issues, we propose a general disentangle model based on bi-level contrastive learning. It brings closer utterances in the same session while encourages each utterance to be near its clustered session prototypes in the representation space. Unlike existing approaches, our disentangle model works in both supervised setting with labeled data and unsupervised setting when no such data is available. The proposed method achieves new state-of-the-art performance on both settings across several public datasets.", + "author": "Chengyu Huang; Zheng Zhang; Hao Fei; Lizi Liao", + "authorids": "/c/chengyu-huang/; /z/zheng-zhang/; /h/hao-fei/; /l/lizi-liao/", + "bibtex": "@inproceedings{huang-etal-2022-conversation,\n title = \"Conversation Disentanglement with Bi-Level Contrastive Learning\",\n author = \"Huang, Chengyu and\n Zhang, Zheng and\n Fei, Hao and\n Liao, Lizi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.217/\",\n doi = \"10.18653/v1/2022.findings-emnlp.217\",\n pages = \"2985--2996\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.217.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.217/", + "pdf_size": 380812, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14549054146709970640&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 6, + "aff": "National University of Singapore; Tsinghua University; National University of Singapore; Singapore Management University", + "aff_domain": "u.nus.edu;gmail.com;nus.edu.sg;smu.edu.sg", + "email": "u.nus.edu;gmail.com;nus.edu.sg;smu.edu.sg", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "National University of Singapore;Tsinghua University;Singapore Management University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.nus.edu.sg;https://www.tsinghua.edu.cn;https://www.smu.edu.sg", + "aff_unique_abbr": "NUS;THU;SMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "2022.emnlp-main.668", + "title": "Coordinated Topic Modeling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We propose a new problem called coordinated topic modeling that imitates human behavior while describing a text corpus. It considers a set of well-defined topics like the axes of a semantic space with a reference representation. It then uses the axes to model a corpus for easily understandable representation. This new task helps represent a corpus more interpretably by reusing existing knowledge and benefits the corpora comparison task. We design ECTM, an embedding-based coordinated topic model that effectively uses the reference representation to capture the target corpus-specific aspects while maintaining each topic\u2019s global semantics. In ECTM, we introduce the topic- and document-level supervision with a self-training mechanism to solve the problem. Finally, extensive experiments on multiple domains show the superiority of our model over other baselines.", + "author": "Pritom Saha Akash; Jie Huang; Kevin Chen-Chuan Chang", + "authorids": "/p/pritom-saha-akash/; /j/jie-huang/; /k/kevin-chen-chuan-chang/", + "bibtex": "@inproceedings{akash-etal-2022-coordinated,\n title = \"Coordinated Topic Modeling\",\n author = \"Akash, Pritom Saha and\n Huang, Jie and\n Chang, Kevin Chen-Chuan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.668/\",\n doi = \"10.18653/v1/2022.emnlp-main.668\",\n pages = \"9831--9843\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.668.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.668/", + "pdf_size": 11985892, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16470650362591831475&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "University of Illinois at Urbana-Champaign, USA; University of Illinois at Urbana-Champaign, USA; University of Illinois at Urbana-Champaign, USA", + "aff_domain": "illinois.edu;illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu", + "github": "https://github.com/pritomsaha/Coordinated-Topic-Modeling", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign", + "aff_unique_dep": "", + "aff_unique_url": "https://illinois.edu", + "aff_unique_abbr": "UIUC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Urbana-Champaign", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.56", + "title": "Correctable-DST: Mitigating Historical Context Mismatch between Training and Inference for Improved Dialogue State Tracking", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently proposed dialogue state tracking (DST) approaches predict the dialogue state of a target turn sequentially based on the previous dialogue state. During the training time, the ground-truth previous dialogue state is utilized as the historical context. However, only the previously predicted dialogue state can be used in inference. This discrepancy might lead to error propagation, i.e., mistakes made by the model in the current turn are likely to be carried over to the following turns.To solve this problem, we propose Correctable Dialogue State Tracking (Correctable-DST). Specifically, it consists of three stages: (1) a Predictive State Simulator is exploited to generate a previously \u201cpredicted\u201d dialogue state based on the ground-truth previous dialogue state during training; (2) a Slot Detector is proposed to determine the slots with an incorrect value in the previously \u201cpredicted\u201d state and the slots whose values are to be updated in the current turn; (3) a State Generator takes the name of the above-selected slots as a prompt to generate the current state.Empirical results show that our approach achieves 67.51%, 68.24%, 70.30%, 71.38%, and 81.27% joint goal accuracy on MultiWOZ 2.0-2.4 datasets, respectively, and achieves a new state-of-the-art performance with significant improvements.", + "author": "Hongyan Xie; Haoxiang Su; Shuangyong Song; Hao Huang; Bo Zou; Kun Deng; Jianghua Lin; Zhihui Zhang; Xiaodong He", + "authorids": "/h/hongyan-xie/; /h/haoxiang-su/; /s/shuangyong-song/; /h/hao-huang/; /b/bo-zou/; /k/kun-deng/; /j/jianghua-lin/; /z/zhihui-zhang/; /x/xiaodong-he/", + "bibtex": "@inproceedings{xie-etal-2022-correctable,\n title = \"Correctable-{DST}: Mitigating Historical Context Mismatch between Training and Inference for Improved Dialogue State Tracking\",\n author = \"Xie, Hongyan and\n Su, Haoxiang and\n Song, Shuangyong and\n Huang, Hao and\n Zou, Bo and\n Deng, Kun and\n Lin, Jianghua and\n Zhang, Zhihui and\n He, Xiaodong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.56/\",\n doi = \"10.18653/v1/2022.emnlp-main.56\",\n pages = \"876--889\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.56.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.56/", + "pdf_size": 540494, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18095013374057793213&as_sdt=40000005&sciodt=0,22&hl=en", + "gs_version_total": 2, + "aff": "School of Information Science and Engineering, Xinjiang University, Urumqi, China; School of Information Science and Engineering, Xinjiang University, Urumqi, China; Department of Big Data and AI, China Telecom; School of Information Science and Engineering, Xinjiang University, Urumqi, China + Xinjiang Provincial Key Laboratory of Multi-lingual Information Technology, Urumqi, China; JD AI Research, Beijing, China; JD AI Research, Beijing, China; JD AI Research, Beijing, China; JD AI Research, Beijing, China; JD AI Research, Beijing, China", + "aff_domain": "gmail.com;gmail.com;chinatelecom.cn;gmail.com;jd.com;jd.com;jd.com;jd.com; ", + "email": "gmail.com;gmail.com;chinatelecom.cn;gmail.com;jd.com;jd.com;jd.com;jd.com; ", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1;0+2;3;3;3;3;3", + "aff_unique_norm": "Xinjiang University;China Telecom;Xinjiang Provincial Key Laboratory of Multi-lingual Information Technology;JD AI Research", + "aff_unique_dep": "School of Information Science and Engineering;Department of Big Data and AI;Provincial Key Laboratory;", + "aff_unique_url": ";https://www.chinatelecom.com.cn;;", + "aff_unique_abbr": ";CT;;", + "aff_campus_unique_index": "0;0;0+0;2;2;2;2;2", + "aff_campus_unique": "Urumqi;;Beijing", + "aff_country_unique_index": "0;0;0;0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.667", + "title": "Correcting Diverse Factual Errors in Abstractive Summarization via Post-Editing and Language Model Infilling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Abstractive summarization models often generate inconsistent summaries containing factual errors or hallucinated content. Recent works focus on correcting factual errors in generated summaries via post-editing. Such correction models are trained using adversarial non-factual summaries constructed using heuristic rules for injecting errors. However, generating non-factual summaries using heuristics often does not generalize well to actual model errors. In this work, we propose to generate hard, representative synthetic examples of non-factual summaries through infilling language models. With this data, we train a more robust fact-correction model to post-edit the summaries to improve factual consistency. Through quantitative and qualitative experiments on two popular summarization datasets\u2014 CNN/DM and XSum\u2014we show that our approach vastly outperforms prior methods in correcting erroneous summaries. Our model\u2014FactEdit\u2014improves factuality scores by over ~11 points on CNN/DM and over ~31 points on XSum on average across multiple summarization models, producing more factual summaries while maintaining competitive summarization quality.", + "author": "Vidhisha Balachandran; Hannaneh Hajishirzi; William Cohen; Yulia Tsvetkov", + "authorids": "/v/vidhisha-balachandran/; /h/hannaneh-hajishirzi/; /w/william-cohen/; /y/yulia-tsvetkov/", + "bibtex": "@inproceedings{balachandran-etal-2022-correcting,\n title = \"Correcting Diverse Factual Errors in Abstractive Summarization via Post-Editing and Language Model Infilling\",\n author = \"Balachandran, Vidhisha and\n Hajishirzi, Hannaneh and\n Cohen, William and\n Tsvetkov, Yulia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.667/\",\n doi = \"10.18653/v1/2022.emnlp-main.667\",\n pages = \"9818--9830\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.667.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.667/", + "pdf_size": 499831, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2795503057437321520&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Language Technologies Institute, Carnegie Mellon University; Allen Institute for Artificial Intelligence + Paul G. Allen School of Computer Science & Engineering, University of Washington; Google Research; Paul G. Allen School of Computer Science & Engineering, University of Washington", + "aff_domain": "cs.cmu.edu;cs.washington.edu;google.com;cs.washington.edu", + "email": "cs.cmu.edu;cs.washington.edu;google.com;cs.washington.edu", + "github": "https://github.com/vidhishanair/FactEdit", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2;3;2", + "aff_unique_norm": "Carnegie Mellon University;Allen Institute for Artificial Intelligence;University of Washington;Google", + "aff_unique_dep": "Language Technologies Institute;;Paul G. Allen School of Computer Science & Engineering;Google Research", + "aff_unique_url": "https://www.cmu.edu;https://allenai.org;https://www.washington.edu;https://research.google", + "aff_unique_abbr": "CMU;AI2;UW;Google Research", + "aff_campus_unique_index": "0;2;3;2", + "aff_campus_unique": "Pittsburgh;;Seattle;Mountain View", + "aff_country_unique_index": "0;0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.106", + "title": "Counterfactual Data Augmentation via Perspective Transition for Open-Domain Dialogues", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The construction of open-domain dialogue systems requires high-quality dialogue datasets. The dialogue data admits a wide variety of responses for a given dialogue history, especially responses with different semantics. However, collecting high-quality such a dataset in most scenarios is labor-intensive and time-consuming. In this paper, we propose a data augmentation method to automatically augment high-quality responses with different semantics by counterfactual inference. Specifically, given an observed dialogue, our counterfactual generation model first infers semantically different responses by replacing the observed reply perspective with substituted ones. Furthermore, our data selection method filters out detrimental augmented responses. Experimental results show that our data augmentation method can augment high-quality responses with different semantics for a given dialogue history, and can outperform competitive baselines on multiple downstream tasks.", + "author": "Jiao Ou; Jinchao Zhang; Yang Feng; Jie Zhou", + "authorids": "/j/jiao-ou/; /j/jinchao-zhang/; /y/yang-feng/; /j/jie-zhou/", + "bibtex": "@inproceedings{ou-etal-2022-counterfactual,\n title = \"Counterfactual Data Augmentation via Perspective Transition for Open-Domain Dialogues\",\n author = \"Ou, Jiao and\n Zhang, Jinchao and\n Feng, Yang and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.106/\",\n doi = \"10.18653/v1/2022.emnlp-main.106\",\n pages = \"1635--1648\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.106.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.106/", + "pdf_size": 623994, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16716031455309709918&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS)+University of Chinese Academy of Sciences; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS)+University of Chinese Academy of Sciences; Pattern Recognition Center, WeChat AI, Tencent Inc, China", + "aff_domain": "ict.ac.cn;tencent.com;ict.ac.cn;tencent.com", + "email": "ict.ac.cn;tencent.com;ict.ac.cn;tencent.com", + "github": "https://github.com/ictnlp/CAPT", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;0+1;2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Tencent Inc", + "aff_unique_dep": "Institute of Computing Technology;;Pattern Recognition Center, WeChat AI", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.tencent.com", + "aff_unique_abbr": "CAS;UCAS;Tencent", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.497", + "title": "Counterfactual Recipe Generation: Exploring Compositional Generalization in a Realistic Scenario", + "track": "main", + "status": "Main", + "award": false, + "abstract": "People can acquire knowledge in an unsupervised manner by reading, and compose the knowledge to make novel combinations. In this paper, we investigate whether pretrained language models can perform compositional generalization in a realistic setting: recipe generation. We design the counterfactual recipe generation task, which asks models to modify a base recipe according to the change of an ingredient. This task requires compositional generalization at two levels: the surface level of incorporating the new ingredient into the base recipe, and the deeper level of adjusting actions related to the changing ingredient. We collect a large-scale recipe dataset in Chinese for models to learn culinary knowledge, and a subset of action-level fine-grained annotations for evaluation.We finetune pretrained language models on the recipe corpus, and use unsupervised counterfactual generation methods to generate modified recipes.Results show that existing models have difficulties in modifying the ingredients while preserving the original text style, and often miss actions that need to be adjusted. Although pretrained language models can generate fluent recipe texts, they fail to truly learn and use the culinary knowledge in a compositional way. Code and data are available at https://github.com/xxxiaol/counterfactual-recipe-generation.", + "author": "Xiao Liu; Yansong Feng; Jizhi Tang; Chengang Hu; Dongyan Zhao", + "authorids": "/x/xiao-liu/; /y/yansong-feng/; /j/jizhi-tang/; /c/chengang-hu/; /d/dongyan-zhao/", + "bibtex": "@inproceedings{liu-etal-2022-counterfactual,\n title = \"Counterfactual Recipe Generation: Exploring Compositional Generalization in a Realistic Scenario\",\n author = \"Liu, Xiao and\n Feng, Yansong and\n Tang, Jizhi and\n Hu, Chengang and\n Zhao, Dongyan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.497/\",\n doi = \"10.18653/v1/2022.emnlp-main.497\",\n pages = \"7354--7370\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.497.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.497/", + "pdf_size": 7766378, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8389326058387384331&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Wangxuan Institute of Computer Technology, Peking University + The MOE Key Laboratory of Computational Linguistics, Peking University; Wangxuan Institute of Computer Technology, Peking University + The MOE Key Laboratory of Computational Linguistics, Peking University; Baidu Inc., Beijing, China; Wangxuan Institute of Computer Technology, Peking University; Beijing Institute for General Artificial Intelligence + State Key Laboratory of Media Convergence Production Technology and Systems", + "aff_domain": "pku.edu.cn;pku.edu.cn;baidu.com;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;baidu.com;pku.edu.cn;pku.edu.cn", + "github": "https://github.com/xxxiaol/counterfactual-recipe-generation", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;1;0;2+3", + "aff_unique_norm": "Peking University;Baidu Inc.;Beijing Institute for General Artificial Intelligence;State Key Laboratory of Media Convergence Production Technology and Systems", + "aff_unique_dep": "Wangxuan Institute of Computer Technology;;;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.baidu.com;http://www.bigaiai.org/;", + "aff_unique_abbr": "PKU;Baidu;BIGAI;", + "aff_campus_unique_index": ";;1;", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.400", + "title": "CrisisLTLSum: A Benchmark for Local Crisis Event Timeline Extraction and Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Social media has increasingly played a key role in emergency response: first responders can use public posts to better react to ongoing crisis events and deploy the necessary resources where they are most needed. Timeline extraction and abstractive summarization are critical technical tasks to leverage large numbers of social media posts about events. Unfortunately, there are few datasets for benchmarking technical approaches for those tasks. This paper presents , the largest dataset of local crisis event timelines available to date. contains 1,000 crisis event timelines across four domains: wildfires, local fires, traffic, and storms. We built using a semi-automated cluster-then-refine approach to collect data from the public Twitter stream. Our initial experiments indicate a significant gap between the performance of strong baselines compared to the human performance on both tasks.Our dataset, code, and models are publicly available (https://github.com/CrisisLTLSum/CrisisTimelines).", + "author": "Hossein Rajaby Faghihi; Bashar Alhafni; Ke Zhang; Shihao Ran; Joel Tetreault; Alejandro Jaimes", + "authorids": "/h/hossein-rajaby-faghihi/; /b/bashar-alhafni/; /k/ke-zhang/; /s/shihao-ran/; /j/joel-tetreault/; /a/alejandro-jaimes/", + "bibtex": "@inproceedings{rajaby-faghihi-etal-2022-crisisltlsum,\n title = \"{C}risis{LTLS}um: A Benchmark for Local Crisis Event Timeline Extraction and Summarization\",\n author = \"Rajaby Faghihi, Hossein and\n Alhafni, Bashar and\n Zhang, Ke and\n Ran, Shihao and\n Tetreault, Joel and\n Jaimes, Alejandro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.400/\",\n doi = \"10.18653/v1/2022.findings-emnlp.400\",\n pages = \"5455--5477\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.400.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.400/", + "pdf_size": 2440944, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17473134338983845201&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 3, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/CrisisLTLSum/CrisisTimelines", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.244", + "title": "Cross-Align: Modeling Deep Cross-lingual Interactions for Word Alignment", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Word alignment which aims to extract lexicon translation equivalents between source and target sentences, serves as a fundamental tool for natural language processing. Recent studies in this area have yielded substantial improvements by generating alignments from contextualized embeddings of the pre-trained multilingual language models. However, we find that the existing approaches capture few interactions between the input sentence pairs, which degrades the word alignment quality severely, especially for the ambiguous words in the monolingual context. To remedy this problem, we propose Cross-Align to model deep interactions between the input sentence pairs, in which the source and target sentences are encoded separately with the shared self-attention modules in the shallow layers, while cross-lingual interactions are explicitly constructed by the cross-attention modules in the upper layers. Besides, to train our model effectively, we propose a two-stage training framework, where the model is trained with a simple Translation Language Modeling (TLM) objective in the first stage and then finetuned with a self-supervised alignment objective in the second stage. Experiments show that the proposed Cross-Align achieves the state-of-the-art (SOTA) performance on four out of five language pairs.", + "author": "Siyu Lai; Zhen Yang; Fandong Meng; Yufeng Chen; Jinan Xu; Jie Zhou", + "authorids": "/s/siyu-lai/; /z/zhen-yang/; /f/fandong-meng/; /y/yufeng-chen/; /j/jinan-xu/; /j/jie-zhou/", + "bibtex": "@inproceedings{lai-etal-2022-cross,\n title = \"Cross-Align: Modeling Deep Cross-lingual Interactions for Word Alignment\",\n author = \"Lai, Siyu and\n Yang, Zhen and\n Meng, Fandong and\n Chen, Yufeng and\n Xu, Jinan and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.244/\",\n doi = \"10.18653/v1/2022.emnlp-main.244\",\n pages = \"3715--3725\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.244.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.244/", + "pdf_size": 2076449, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11615347534516289345&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China+Pattern Recognition Center, WeChat AI, Tencent Inc, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China; Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, Beijing, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China", + "aff_domain": "bjtu.edu.cn;tencent.com;tencent.com;bjtu.edu.cn;bjtu.edu.cn;tencent.com", + "email": "bjtu.edu.cn;tencent.com;tencent.com;bjtu.edu.cn;bjtu.edu.cn;tencent.com", + "github": "https://github.com/lisasiyu/Cross-Align", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;1;0;0;1", + "aff_unique_norm": "Beijing Jiaotong University;Tencent Inc", + "aff_unique_dep": "Beijing Key Lab of Traffic Data Analysis and Mining;Pattern Recognition Center, WeChat AI", + "aff_unique_url": "http://www.bjtu.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "BJTU;Tencent", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.22", + "title": "Cross-Domain Sentiment Classification using Semantic Representation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Previous studies on cross-domain sentiment classification depend on the pivot features or utilize the target data for representation learning, which ignore the semantic relevance between different domains. To this end, we exploit Abstract Meaning Representation (AMR) to help with cross-domain sentiment classification. Compared with the textual input, AMR reduces data sparsity and explicitly provides core semantic knowledge and correlations between different domains. In particular, we develop an algorithm to construct a sentiment-driven semantic graph from sentence-level AMRs. We further design two strategies to linearize the semantic graph and propose a text-graph interaction model to fuse the text and semantic graph representations for cross-domain sentiment classification. Empirical studies show the effectiveness of our proposed model over several strong baselines. The results also indicate the importance of the proposed sentiment-driven semantic graph for cross-domain sentiment classification.", + "author": "Shichen Li; Zhongqing Wang; Xiaotong Jiang; Guodong Zhou", + "authorids": "/s/shichen-li/; /z/zhongqing-wang/; /x/xiaotong-jiang/; /g/guodong-zhou/", + "bibtex": "@inproceedings{li-etal-2022-cross-domain,\n title = \"Cross-Domain Sentiment Classification using Semantic Representation\",\n author = \"Li, Shichen and\n Wang, Zhongqing and\n Jiang, Xiaotong and\n Zhou, Guodong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.22/\",\n doi = \"10.18653/v1/2022.findings-emnlp.22\",\n pages = \"289--299\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.22.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.22/", + "pdf_size": 608499, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=728002809204037780&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Natural Language Processing Lab, Soochow University, Suzhou, China; Natural Language Processing Lab, Soochow University, Suzhou, China; Natural Language Processing Lab, Soochow University, Suzhou, China; Natural Language Processing Lab, Soochow University, Suzhou, China", + "aff_domain": "outlook.com;suda.edu.cn;outlook.com;suda.edu.cn", + "email": "outlook.com;suda.edu.cn;outlook.com;suda.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Soochow University", + "aff_unique_dep": "Natural Language Processing Lab", + "aff_unique_url": "http://www.soochow.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Suzhou", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-industry.16", + "title": "Cross-Encoder Data Annotation for Bi-Encoder Based Product Matching", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Matching a seller listed item to an appropriate product is an important step for an e-commerce platform. With the recent advancement in deep learning, there are different encoder based approaches being proposed as solution. When textual data for two products are available, cross-encoder approaches encode them jointly while bi-encoder approaches encode them separately. Since cross-encoders are computationally heavy, approaches based on bi-encoders are a common practice for this challenge. In this paper, we propose cross-encoder data annotation; a technique to annotate or refine human annotated training data for bi-encoder models using a cross-encoder model. This technique enables us to build a robust model without annotation on newly collected training data or further improve model performance on annotated training data. We evaluate the cross-encoder data annotation on the product matching task using a real-world e-commerce dataset containing 104 million products. Experimental results show that the cross-encoder data annotation improves 4% absolute accuracy when no annotation for training data is available, and 2% absolute accuracy when annotation for training data is available.", + "author": "Justin Chiu; Keiji Shinzato", + "authorids": "/j/justin-chiu/; /k/keiji-shinzato/", + "bibtex": "@inproceedings{chiu-shinzato-2022-cross,\n title = \"Cross-Encoder Data Annotation for Bi-Encoder Based Product Matching\",\n author = \"Chiu, Justin and\n Shinzato, Keiji\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.16/\",\n doi = \"10.18653/v1/2022.emnlp-industry.16\",\n pages = \"161--168\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.16.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.16/", + "pdf_size": 386892, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11943657230745605645&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Rakuten Institute of Technology; Rakuten Group Inc.", + "aff_domain": "rakuten.com;rakuten.com", + "email": "rakuten.com;rakuten.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Rakuten Institute of Technology;Rakuten Group", + "aff_unique_dep": ";", + "aff_unique_url": "https://rit.rakuten.com;https://www.rakuten.com", + "aff_unique_abbr": "RIT;Rakuten", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.emnlp-main.552", + "title": "Cross-Linguistic Syntactic Difference in Multilingual BERT: How Good is It and How Does It Affect Transfer?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multilingual BERT (mBERT) has demonstrated considerable cross-lingual syntactic ability, whereby it enables effective zero-shot cross-lingual transfer of syntactic knowledge. The transfer is more successful between some languages, but it is not well understood what leads to this variation and whether it fairly reflects difference between languages. In this work, we investigate the distributions of grammatical relations induced from mBERT in the context of 24 typologically different languages. We demonstrate that the distance between the distributions of different languages is highly consistent with the syntactic difference in terms of linguistic formalisms. Such difference learnt via self-supervision plays a crucial role in the zero-shot transfer performance and can be predicted by variation in morphosyntactic properties between languages. These results suggest that mBERT properly encodes languages in a way consistent with linguistic diversity and provide insights into the mechanism of cross-lingual transfer.", + "author": "Ningyu Xu; Tao Gui; Ruotian Ma; Qi Zhang; Jingting Ye; Menghan Zhang; Xuanjing Huang", + "authorids": "/n/ningyu-xu/; /t/tao-gui/; /r/ruotian-ma/; /q/qi-zhang/; /j/jingting-ye/; /m/menghan-zhang/; /x/xuan-jing-huang/", + "bibtex": "@inproceedings{xu-etal-2022-cross,\n title = \"Cross-Linguistic Syntactic Difference in Multilingual {BERT}: How Good is It and How Does It Affect Transfer?\",\n author = \"Xu, Ningyu and\n Gui, Tao and\n Ma, Ruotian and\n Zhang, Qi and\n Ye, Jingting and\n Zhang, Menghan and\n Huang, Xuanjing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.552/\",\n doi = \"10.18653/v1/2022.emnlp-main.552\",\n pages = \"8073--8092\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.552.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.552/", + "pdf_size": 2431651, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10770855520045748553&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "School of Computer Science, Fudan University + Institute of Modern Languages and Linguistics, Fudan University; Institute of Modern Languages and Linguistics, Fudan University; School of Computer Science, Fudan University; School of Computer Science, Fudan University; Department of Chinese Language and Literature, Fudan University + Department of Linguistic and Cultural Evolution, Max Planck Institute for Evolutionary Anthropology; Institute of Modern Languages and Linguistics, Fudan University; School of Computer Science, Fudan University + Institute of Modern Languages and Linguistics, Fudan University", + "aff_domain": "m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+0;0;0;0;0+1;0;0+0", + "aff_unique_norm": "Fudan University;Max Planck Institute for Evolutionary Anthropology", + "aff_unique_dep": "School of Computer Science;Department of Linguistic and Cultural Evolution", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.eva.mpg.de", + "aff_unique_abbr": "Fudan;MPI-EVA", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0+1;0;0+0", + "aff_country_unique": "China;Germany" + }, + { + "id": "2022.emnlp-main.516", + "title": "Cross-Modal Similarity-Based Curriculum Learning for Image Captioning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Image captioning models require the high-level generalization ability to describe the contents of various images in words. Most existing approaches treat the image\u2013caption pairs equally in their training without considering the differences in their learning difficulties. Several image captioning approaches introduce curriculum learning methods that present training data with increasing levels of difficulty. However, their difficulty measurements are either based on domain-specific features or prior model training. In this paper, we propose a simple yet efficient difficulty measurement for image captioning using cross-modal similarity calculated by a pretrained vision\u2013language model. Experiments on the COCO and Flickr30k datasets show that our proposed approach achieves superior performance and competitive convergence speed to baselines without requiring heuristics or incurring additional training costs. Moreover, the higher model performance on difficult examples and unseen data also demonstrates the generalization ability.", + "author": "Hongkuan Zhang; Saku Sugawara; Akiko Aizawa; Lei Zhou; Ryohei Sasano; Koichi Takeda", + "authorids": "/h/hongkuan-zhang/; /s/saku-sugawara/; /a/akiko-aizawa/; /l/lei-zhou/; /r/ryohei-sasano/; /k/koichi-takeda/", + "bibtex": "@inproceedings{zhang-etal-2022-cross,\n title = \"Cross-Modal Similarity-Based Curriculum Learning for Image Captioning\",\n author = \"Zhang, Hongkuan and\n Sugawara, Saku and\n Aizawa, Akiko and\n Zhou, Lei and\n Sasano, Ryohei and\n Takeda, Koichi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.516/\",\n doi = \"10.18653/v1/2022.emnlp-main.516\",\n pages = \"7599--7606\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.516.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.516/", + "pdf_size": 2674837, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3303572283003309923&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff": "Nagoya University; National Institute of Informatics; National Institute of Informatics; Nagoya University; Nagoya University; Nagoya University", + "aff_domain": "s.mail.nagoya-u.ac.jp;nii.ac.jp;nii.ac.jp;s.mail.nagoya-u.ac.jp;i.nagoya-u.ac.jp;i.nagoya-u.ac.jp", + "email": "s.mail.nagoya-u.ac.jp;nii.ac.jp;nii.ac.jp;s.mail.nagoya-u.ac.jp;i.nagoya-u.ac.jp;i.nagoya-u.ac.jp", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;0;0;0", + "aff_unique_norm": "Nagoya University;National Institute of Informatics", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.nagoya-u.ac.jp;https://www.nii.ac.jp/", + "aff_unique_abbr": "Nagoya U;NII", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.emnlp-main.58", + "title": "Cross-document Event Coreference Search: Task, Dataset and Modeling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The task of Cross-document Coreference Resolution has been traditionally formulated as requiring to identify all coreference links across a given set of documents. We propose an appealing, and often more applicable, complementary set up for the task \u2013 Cross-document Coreference Search, focusing in this paper on event coreference. Concretely, given a mention in context of an event of interest, considered as a query, the task is to find all coreferring mentions for the query event in a large document collection. To support research on this task, we create a corresponding dataset, which is derived from Wikipedia while leveraging annotations in the available Wikipedia Event Coreferecene dataset (WEC-Eng). Observing that the coreference search setup is largely analogous to the setting of Open Domain Question Answering, we adapt the prominent Deep Passage Retrieval (DPR) model to our setting, as an appealing baseline. Finally, we present a novel model that integrates a powerful coreference scoring scheme into the DPR architecture, yielding improved performance.", + "author": "Alon Eirew; Avi Caciularu; Ido Dagan", + "authorids": "/a/alon-eirew/; /a/avi-caciularu/; /i/ido-dagan/", + "bibtex": "@inproceedings{eirew-etal-2022-cross,\n title = \"Cross-document Event Coreference Search: Task, Dataset and Modeling\",\n author = \"Eirew, Alon and\n Caciularu, Avi and\n Dagan, Ido\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.58/\",\n doi = \"10.18653/v1/2022.emnlp-main.58\",\n pages = \"900--913\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.58.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.58/", + "pdf_size": 646024, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8521326045013009987&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": "Bar Ilan University, Ramat-Gan, Israel+Intel Labs, Israel; Bar Ilan University, Ramat-Gan, Israel; Bar Ilan University, Ramat-Gan, Israel", + "aff_domain": "intel.com;gmail.com;cs.biu.ac.il", + "email": "intel.com;gmail.com;cs.biu.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "Bar Ilan University;Intel Labs", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.biu.ac.il;https://www.intel.com/content/www/us/en/research/labs.html", + "aff_unique_abbr": "BIU;Intel", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Ramat-Gan;", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.emnlp-main.749", + "title": "Cross-domain Generalization for AMR Parsing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Abstract Meaning Representation (AMR) parsing aims to predict an AMR graph from textual input. Recently, there has been notable growth in AMR parsing performance. However, most existing work focuses on improving the performance in the specific domain, ignoring the potential domain dependence of AMR parsing systems. To address this, we extensively evaluate five representative AMR parsers on five domains and analyze challenges to cross-domain AMR parsing. We observe that challenges to cross-domain AMR parsing mainly arise from the distribution shift of words and AMR concepts. Based on our observation, we investigate two approaches to reduce the domain distribution divergence of text and AMR features, respectively. Experimental results on two out-of-domain test sets show the superiority of our method.", + "author": "Xuefeng Bai; Sen Yang; Leyang Cui; Linfeng Song; Yue Zhang", + "authorids": "/x/xuefeng-bai/; /s/sen-yang/; /l/leyang-cui/; /l/linfeng-song/; /y/yue-zhang/", + "bibtex": "@inproceedings{bai-etal-2022-cross,\n title = \"Cross-domain Generalization for {AMR} Parsing\",\n author = \"Bai, Xuefeng and\n Yang, Sen and\n Cui, Leyang and\n Song, Linfeng and\n Zhang, Yue\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.749/\",\n doi = \"10.18653/v1/2022.emnlp-main.749\",\n pages = \"10907--10921\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.749.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.749/", + "pdf_size": 513589, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15814558484216428783&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Engineering, Westlake University, China + Institute of Advanced Technology, Westlake Institute for Advanced Study, China; The Chinese University of Hong Kong, China; Tencent AI Lab, Shenzhen, China; Tencent AI Lab, Bellevue, WA, USA + Institute of Advanced Technology, Westlake Institute for Advanced Study, China; School of Engineering, Westlake University, China + Institute of Advanced Technology, Westlake Institute for Advanced Study, China", + "aff_domain": "westlake.edu.cn;cuhk.edu.hk;tencent.com;tencent.com;westlake.edu.cn", + "email": "westlake.edu.cn;cuhk.edu.hk;tencent.com;tencent.com;westlake.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;3;4+1;0+1", + "aff_unique_norm": "Westlake University;Westlake Institute for Advanced Study;The Chinese University of Hong Kong;Tencent AI Lab;Tencent", + "aff_unique_dep": "School of Engineering;Institute of Advanced Technology;;AI Lab;AI Lab", + "aff_unique_url": "https://www.westlake.edu.cn;http://www.wias.org.cn/;https://www.cuhk.edu.hk;https://ai.tencent.com;https://ai.tencent.com", + "aff_unique_abbr": ";WIAS;CUHK;Tencent AI Lab;Tencent AI Lab", + "aff_campus_unique_index": ";1;2;", + "aff_campus_unique": ";Shenzhen;Bellevue", + "aff_country_unique_index": "0+0;0;0;1+0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.388", + "title": "Cross-lingual Text-to-SQL Semantic Parsing with Representation Mixup", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We focus on the cross-lingual Text-to-SQL semantic parsing task,where the parsers are expected to generate SQL for non-English utterances based on English database schemas.Intuitively, English translation as side information is an effective way to bridge the language gap,but noise introduced by the translation system may affect parser effectiveness.In this work, we propose a Representation Mixup Framework (Rex) for effectively exploiting translations in the cross-lingual Text-to-SQL task.Particularly, it uses a general encoding layer, a transition layer, and a target-centric layer to properly guide the information flow of the English translation.Experimental results on CSpider and VSpider show that our framework can benefit from cross-lingual training and improve the effectiveness of semantic parsers, achieving state-of-the-art performance.", + "author": "Peng Shi; Linfeng Song; Lifeng Jin; Haitao Mi; He Bai; Jimmy Lin; Dong Yu", + "authorids": "/p/peng-shi/; /l/linfeng-song/; /l/lifeng-jin/; /h/haitao-mi/; /h/he-bai/; /j/jimmy-lin/; /d/dong-yu/", + "bibtex": "@inproceedings{shi-etal-2022-cross,\n title = \"Cross-lingual Text-to-{SQL} Semantic Parsing with Representation Mixup\",\n author = \"Shi, Peng and\n Song, Linfeng and\n Jin, Lifeng and\n Mi, Haitao and\n Bai, He and\n Lin, Jimmy and\n Yu, Dong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.388/\",\n doi = \"10.18653/v1/2022.findings-emnlp.388\",\n pages = \"5296--5306\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.388.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.388/", + "pdf_size": 587413, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15092418641254180801&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "University of Waterloo\u2660; Tencent AI Lab, Bellevue, WA, USA\u2663; Tencent AI Lab, Bellevue, WA, USA\u2663; Tencent AI Lab, Bellevue, WA, USA\u2663; University of Waterloo\u2660; University of Waterloo\u2660; Tencent AI Lab, Bellevue, WA, USA\u2663", + "aff_domain": "uwaterloo.ca;tencent.com; ; ; ; ; ", + "email": "uwaterloo.ca;tencent.com; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;1;0;0;1", + "aff_unique_norm": "University of Waterloo;Tencent", + "aff_unique_dep": ";AI Lab", + "aff_unique_url": "https://uwaterloo.ca;https://ai.tencent.com", + "aff_unique_abbr": "UW;Tencent AI Lab", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Bellevue", + "aff_country_unique_index": "0;1;1;1;0;0;1", + "aff_country_unique": "Canada;United States" + }, + { + "id": "2022.emnlp-main.511", + "title": "Cross-lingual neural fuzzy matching for exploiting target-language monolingual corpora in computer-aided translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Computer-aided translation (CAT) tools based on translation memories (MT) play a prominent role in the translation workflow of professional translators. However, the reduced availability of in-domain TMs, as compared to in-domain monolingual corpora, limits its adoption for a number of translation tasks. In this paper, we introduce a novel neural approach aimed at overcoming this limitation by exploiting not only TMs, but also in-domain target-language (TL) monolingual corpora, and still enabling a similar functionality to that offered by conventional TM-based CAT tools. Our approach relies on cross-lingual sentence embeddings to retrieve translation proposals from TL monolingual corpora, and on a neural model to estimate their post-editing effort. The paper presents an automatic evaluation of these techniques on four language pairs that shows that our approach can successfully exploit monolingual texts in a TM-based CAT environment, increasing the amount of useful translation proposals, and that our neural model for estimating the post-editing effort enables the combination of translation proposals obtained from monolingual corpora and from TMs in the usual way. A human evaluation performed on a single language pair confirms the results of the automatic evaluation and seems to indicate that the translation proposals retrieved with our approach are more useful than what the automatic evaluation shows.", + "author": "Miquel Espl\u00e0-Gomis; V\u00edctor M. S\u00e1nchez-Cartagena; Juan Antonio P\u00e9rez-Ortiz; Felipe S\u00e1nchez-Mart\u00ednez", + "authorids": "/m/miquel-espla-gomis/; /v/victor-m-sanchez-cartagena/; /j/juan-antonio-perez-ortiz/; /f/felipe-sanchez-martinez/", + "bibtex": "@inproceedings{espla-gomis-etal-2022-cross,\n title = \"Cross-lingual neural fuzzy matching for exploiting target-language monolingual corpora in computer-aided translation\",\n author = \"Espl{\\`a}-Gomis, Miquel and\n S{\\'a}nchez-Cartagena, V{\\'i}ctor M. and\n P{\\'e}rez-Ortiz, Juan Antonio and\n S{\\'a}nchez-Mart{\\'i}nez, Felipe\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.511/\",\n doi = \"10.18653/v1/2022.emnlp-main.511\",\n pages = \"7532--7543\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.511.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.511/", + "pdf_size": 208375, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18364134783455119957&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 8, + "aff": "Dep. de Llenguatges i Sistemes Inform\u00e0tics, Universitat d\u2019Alacant; Dep. de Llenguatges i Sistemes Inform\u00e0tics, Universitat d\u2019Alacant; Dep. de Llenguatges i Sistemes Inform\u00e0tics, Universitat d\u2019Alacant; Dep. de Llenguatges i Sistemes Inform\u00e0tics, Universitat d\u2019Alacant", + "aff_domain": "dlsi.ua.es;dlsi.ua.es;dlsi.ua.es;dlsi.ua.es", + "email": "dlsi.ua.es;dlsi.ua.es;dlsi.ua.es;dlsi.ua.es", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Universitat d'Alacant", + "aff_unique_dep": "Departament de Llenguatges i Sistemes Inform\u00e0tics", + "aff_unique_url": "https://www.ua.es", + "aff_unique_abbr": "UA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Spain" + }, + { + "id": "2022.emnlp-main.467", + "title": "Cross-stitching Text and Knowledge Graph Encoders for Distantly Supervised Relation Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Bi-encoder architectures for distantly-supervised relation extraction are designed to make use of the complementary information found in text and knowledge graphs (KG).However, current architectures suffer from two drawbacks. They either do not allow any sharing between the text encoder and the KG encoder at all, or, in case of models with KG-to-text attention, only share information in one direction. Here, we introduce cross-stitch bi-encoders, which allow full interaction between the text encoder and the KG encoder via a cross-stitch mechanism. The cross-stitch mechanism allows sharing and updating representations between the two encoders at any layer, with the amount of sharing being dynamically controlled via cross-attention-based gates. Experimental results on two relation extraction benchmarks from two different domains show that enabling full interaction between the two encoders yields strong improvements.", + "author": "Qin Dai; Benjamin Heinzerling; Kentaro Inui", + "authorids": "/q/qin-dai/; /b/benjamin-heinzerling/; /k/kentaro-inui/", + "bibtex": "@inproceedings{dai-etal-2022-cross,\n title = \"Cross-stitching Text and Knowledge Graph Encoders for Distantly Supervised Relation Extraction\",\n author = \"Dai, Qin and\n Heinzerling, Benjamin and\n Inui, Kentaro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.467/\",\n doi = \"10.18653/v1/2022.emnlp-main.467\",\n pages = \"6947--6958\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.467.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.467/", + "pdf_size": 694354, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=218745367493317144&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff": "Tohoku University; Tohoku University + RIKEN AIP; Tohoku University + RIKEN AIP", + "aff_domain": "tohoku.ac.jp;riken.jp;tohoku.ac.jp", + "email": "tohoku.ac.jp;riken.jp;tohoku.ac.jp", + "github": "https://github.com/cl-tohoku/xbe", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;0+1", + "aff_unique_norm": "Tohoku University;RIKEN", + "aff_unique_dep": ";Advanced Institute for Computational Science", + "aff_unique_url": "https://www.tohoku.ac.jp;https://www.aip.riken.jp", + "aff_unique_abbr": "Tohoku U;RIKEN AIP", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0+0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.findings-emnlp.263", + "title": "CrossRE: A Cross-Domain Dataset for Relation Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Relation Extraction (RE) has attracted increasing attention, but current RE evaluation is limited to in-domain evaluation setups. Little is known on how well a RE system fares in challenging, but realistic out-of-distribution evaluation setups. To address this gap, we propose CrossRE, a new, freely-available cross-domain benchmark for RE, which comprises six distinct text domains and includes multi-label annotations. An additional innovation is that we release meta-data collected during annotation, to include explanations and flags of difficult instances. We provide an empirical evaluation with a state-of-the-art model for relation classification. As the meta-data enables us to shed new light on the state-of-the-art model, we provide a comprehensive analysis on the impact of difficult cases and find correlations between model and human annotations. Overall, our empirical investigation highlights the difficulty of cross-domain RE. We release our dataset, to spur more research in this direction.", + "author": "Elisa Bassignana; Barbara Plank", + "authorids": "/e/elisa-bassignana/; /b/barbara-plank/", + "bibtex": "@inproceedings{bassignana-plank-2022-crossre,\n title = \"{C}ross{RE}: A Cross-Domain Dataset for Relation Extraction\",\n author = \"Bassignana, Elisa and\n Plank, Barbara\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.263/\",\n doi = \"10.18653/v1/2022.findings-emnlp.263\",\n pages = \"3592--3604\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.263.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.263/", + "pdf_size": 361725, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4993427541733111186&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, IT University of Copenhagen, Denmark+Center for Information and Language Processing (CIS), LMU Munich, Germany+Munich Center for Machine Learning (MCML), Munich, Germany; Center for Information and Language Processing (CIS), LMU Munich, Germany+Munich Center for Machine Learning (MCML), Munich, Germany", + "aff_domain": "itu.dk;cis.lmu.de", + "email": "itu.dk;cis.lmu.de", + "github": "https://github.com/mainlp/CrossRE", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1+2;1+2", + "aff_unique_norm": "IT University of Copenhagen;LMU Munich;Munich Center for Machine Learning", + "aff_unique_dep": "Department of Computer Science;Center for Information and Language Processing (CIS);", + "aff_unique_url": "https://itu.dk;https://www.lmu.de;", + "aff_unique_abbr": "ITU Copenhagen;LMU;MCML", + "aff_campus_unique_index": "1+1;1+1", + "aff_campus_unique": ";Munich", + "aff_country_unique_index": "0+1+1;1+1", + "aff_country_unique": "Denmark;Germany" + }, + { + "id": "2022.emnlp-main.45", + "title": "Crossmodal-3600: A Massively Multilingual Multimodal Evaluation Dataset", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Research in massively multilingual image captioning has been severely hampered by a lack of high-quality evaluation datasets. In this paper we present the Crossmodal-3600 dataset (XM3600 in short), a geographically diverse set of 3600 images annotated with human-generated reference captions in 36 languages. The images were selected from across the world, covering regions where the 36 languages are spoken, and annotated with captions that achieve consistency in terms of style across all languages, while avoiding annotation artifacts due to direct translation. We apply this benchmark to model selection for massively multilingual image captioning models, and show superior correlation results with human evaluations when using XM3600 as golden references for automatic metrics.", + "author": "Ashish V. Thapliyal; Jordi Pont Tuset; Xi Chen; Radu Soricut", + "authorids": "/a/ashish-v-thapliyal/; /j/jordi-pont-tuset/; /x/xi-chen/; /r/radu-soricut/", + "bibtex": "@inproceedings{thapliyal-etal-2022-crossmodal,\n title = \"Crossmodal-3600: A Massively Multilingual Multimodal Evaluation Dataset\",\n author = \"Thapliyal, Ashish V. and\n Pont Tuset, Jordi and\n Chen, Xi and\n Soricut, Radu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.45/\",\n doi = \"10.18653/v1/2022.emnlp-main.45\",\n pages = \"715--729\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.45.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.45/", + "pdf_size": 1378625, + "gs_citation": 66, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12327280884250713111&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Google Research; Google Research; Google Research; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.55", + "title": "Curriculum Knowledge Distillation for Emoji-supervised Cross-lingual Sentiment Analysis", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing sentiment analysis models have achieved great advances with the help of sufficient sentiment annotations. Unfortunately, many languages do not have sufficient sentiment corpus. To this end, recent studies have proposed cross-lingual sentiment analysis to transfer sentiment analysis models from resource-rich languages to low-resource languages. However, these studies either rely on external cross-lingual supervision (e.g., parallel corpora and translation model), or are limited by the cross-lingual gaps. In this work, based on the intuitive assumption that the relationships between emojis and sentiments are consistent across different languages, we investigate transferring sentiment knowledge across languages with the help of emojis. To this end, we propose a novel cross-lingual sentiment analysis approach dubbed Curriculum Knowledge Distiller (CKD). The core idea of CKD is to use emojis to bridge the source and target languages. Note that, compared with texts, emojis are more transferable, but cannot reveal the precise sentiment. Thus, we distill multiple Intermediate Sentiment Classifiers (ISC) on source language corpus with emojis to get ISCs with different attention weights of texts. To transfer them into the target language, we distill ISCs into the Target Language Sentiment Classifier (TSC) following the curriculum learning mechanism. In this way, TSC can learn delicate sentiment knowledge, meanwhile, avoid being affected by cross-lingual gaps. Experimental results on five cross-lingual benchmarks clearly verify the effectiveness of our approach.", + "author": "Jianyang Zhang; Tao Liang; Mingyang Wan; Guowu Yang; Fengmao Lv", + "authorids": "/j/jianyang-zhang/; /t/tao-liang/; /m/mingyang-wan/; /g/guowu-yang/; /f/fengmao-lv/", + "bibtex": "@inproceedings{zhang-etal-2022-curriculum,\n title = \"Curriculum Knowledge Distillation for Emoji-supervised Cross-lingual Sentiment Analysis\",\n author = \"Zhang, Jianyang and\n Liang, Tao and\n Wan, Mingyang and\n Yang, Guowu and\n Lv, Fengmao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.55/\",\n doi = \"10.18653/v1/2022.emnlp-main.55\",\n pages = \"864--875\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.55.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.55/", + "pdf_size": 452702, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8490037262158518911&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "University of Electronic Science and Technology of China+Bytedance; Bytedance; Bytedance; University of Electronic Science and Technology of China; Southwest Jiaotong University", + "aff_domain": "std.uestc.edu.cn;126.com;bytedance.com;uestc.edu.cn;126.com", + "email": "std.uestc.edu.cn;126.com;bytedance.com;uestc.edu.cn;126.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;1;0;2", + "aff_unique_norm": "University of Electronic Science and Technology of China;Bytedance;Southwest Jiaotong University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.uestc.edu.cn;https://www.bytedance.com;https://www.swjtu.edu.cn", + "aff_unique_abbr": "UESTC;Bytedance;SWJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.209", + "title": "Curriculum Learning Meets Weakly Supervised Multimodal Correlation Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In the field of multimodal sentiment analysis (MSA), a few studies have leveraged the inherent modality correlation information stored in samples for self-supervised learning. However, they feed the training pairs in a random order without consideration of difficulty. Without human annotation, the generated training pairs of self-supervised learning often contain noise. If noisy or hard pairs are used for training at the easy stage, the model might be stuck in bad local optimum. In this paper, we inject curriculum learning into weakly supervised multimodal correlation learning. The weakly supervised correlation learning leverages the label information to generate scores for negative pairs to learn a more discriminative embedding space, where negative pairs are defined as two unimodal embeddings from different samples. To assist the correlation learning, we feed the training pairs to the model according to difficulty by the proposed curriculum learning, which consists of elaborately designed scoring and feeding functions. The scoring function computes the difficulty of pairs using pre-trained and current correlation predictors, where the pairs with large losses are defined as hard pairs. Notably, the hardest pairs are discarded in our algorithm, which are assumed as noisy pairs. Moreover, the feeding function takes the difference of correlation losses as feedback to determine the feeding actions (\u2018stay\u2019, \u2018step back\u2019, or \u2018step forward\u2019). The proposed method reaches state-of-the-art performance on MSA.", + "author": "Sijie Mai; Ya Sun; Haifeng Hu", + "authorids": "/s/sijie-mai/; /y/ya-sun/; /h/haifeng-hu/", + "bibtex": "@inproceedings{mai-etal-2022-curriculum,\n title = \"Curriculum Learning Meets Weakly Supervised Multimodal Correlation Learning\",\n author = \"Mai, Sijie and\n Sun, Ya and\n Hu, Haifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.209/\",\n doi = \"10.18653/v1/2022.emnlp-main.209\",\n pages = \"3191--3203\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.209.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.209/", + "pdf_size": 1035236, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6469544694612332695&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "School of Electronics and Information Technology, Sun Yat-sen University; School of Electronics and Information Technology, Sun Yat-sen University; School of Electronics and Information Technology, Sun Yat-sen University", + "aff_domain": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn", + "email": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;mail.sysu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Sun Yat-sen University", + "aff_unique_dep": "School of Electronics and Information Technology", + "aff_unique_url": "http://www.sysu.edu.cn", + "aff_unique_abbr": "SYSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.72", + "title": "Curriculum Prompt Learning with Self-Training for Abstractive Dialogue Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Succinctly summarizing dialogue is a task of growing interest, but inherent challenges, such as insufficient training data and low information density impede our ability to train abstractive models. In this work, we propose a novel curriculum-based prompt learning method with self-training to address these problems. Specifically, prompts are learned using a curriculum learning strategy that gradually increases the degree of prompt perturbation, thereby improving the dialogue understanding and modeling capabilities of our model. Unlabeled dialogue is incorporated by means of self-training so as to reduce the dependency on labeled data. We further investigate topic-aware prompts to better plan for the generation of summaries. Experiments confirm that our model substantially outperforms strong baselines and achieves new state-of-the-art results on the AMI and ICSI datasets. Human evaluations also show the superiority of our model with regard to the summary generation quality.", + "author": "Changqun Li; Linlin Wang; Xin Lin; Gerard de Melo; Liang He", + "authorids": "/c/changqun-li/; /l/linlin-wang/; /x/xin-lin/; /g/gerard-de-melo/; /l/liang-he/", + "bibtex": "@inproceedings{li-etal-2022-curriculum,\n title = \"Curriculum Prompt Learning with Self-Training for Abstractive Dialogue Summarization\",\n author = \"Li, Changqun and\n Wang, Linlin and\n Lin, Xin and\n de Melo, Gerard and\n He, Liang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.72/\",\n doi = \"10.18653/v1/2022.emnlp-main.72\",\n pages = \"1096--1106\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.72.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.72/", + "pdf_size": 519159, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18081228719088746502&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "East China Normal University; East China Normal University; East China Normal University; Hasso Plattner Institute / University of Potsdam; East China Normal University", + "aff_domain": "stu.ecnu.edu.cn;cs.ecnu.edu.cn;cs.ecnu.edu.cn;cs.ecnu.edu.cn;demelo.org", + "email": "stu.ecnu.edu.cn;cs.ecnu.edu.cn;cs.ecnu.edu.cn;cs.ecnu.edu.cn;demelo.org", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "East China Normal University;Hasso Plattner Institute", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.hpi.de", + "aff_unique_abbr": "ECNU;HPI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "China;Germany" + }, + { + "id": "2022.emnlp-main.814", + "title": "CycleKQR: Unsupervised Bidirectional Keyword-Question Rewriting", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Users expect their queries to be answered by search systems, regardless of the query\u2019s surface form, which include keyword queries and natural questions. Natural Language Understanding (NLU) components of Search and QA systems may fail to correctly interpret semantically equivalent inputs if this deviates from how the system was trained, leading to suboptimal understanding capabilities. We propose the keyword-question rewriting task to improve query understanding capabilities of NLU systems for all surface forms. To achieve this, we present CycleKQR, an unsupervised approach, enabling effective rewriting between keyword and question queries using non-parallel data.Empirically we show the impact on QA performance of unfamiliar query forms for open domain and Knowledge Base QA systems (trained on either keywords or natural language questions). We demonstrate how CycleKQR significantly improves QA performance by rewriting queries into the appropriate form, while at the same time retaining the original semantic meaning of input queries, allowing CycleKQR to improve performance by up to 3% over supervised baselines. Finally, we release a datasetof 66k keyword-question pairs.", + "author": "Andrea Iovine; Anjie Fang; Besnik Fetahu; Jie Zhao; Oleg Rokhlenko; Shervin Malmasi", + "authorids": "/a/andrea-iovine/; /a/anjie-fang/; /b/besnik-fetahu/; /j/jie-zhao/; /o/oleg-rokhlenko/; /s/shervin-malmasi/", + "bibtex": "@inproceedings{iovine-etal-2022-cyclekqr,\n title = \"{C}ycle{KQR}: Unsupervised Bidirectional Keyword-Question Rewriting\",\n author = \"Iovine, Andrea and\n Fang, Anjie and\n Fetahu, Besnik and\n Zhao, Jie and\n Rokhlenko, Oleg and\n Malmasi, Shervin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.814/\",\n doi = \"10.18653/v1/2022.emnlp-main.814\",\n pages = \"11875--11886\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.814.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.814/", + "pdf_size": 541194, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8857775405374079929&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/amzn/kqr", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.156", + "title": "D4: a Chinese Dialogue Dataset for Depression-Diagnosis-Oriented Chat", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In a depression-diagnosis-directed clinical session, doctors initiate a conversation with ample emotional support that guides the patients to expose their symptoms based on clinical diagnosis criteria. Such a dialogue system is distinguished from existing single-purpose human-machine dialog systems, as it combines task-oriented and chit-chats with uniqueness in dialogue topics and procedures. However, due to the social stigma associated with mental illness, the dialogue data related to depression consultation and diagnosis are rarely disclosed. Based on clinical depression diagnostic criteria ICD-11 and DSM-5, we designed a 3-phase procedure to construct D4: a Chinese Dialogue Dataset for Depression-Diagnosis-Oriented Chat, which simulates the dialogue between doctors and patients during the diagnosis of depression, including diagnosis results and symptom summary given by professional psychiatrists for each conversation. Upon the newly-constructed dataset, four tasks mirroring the depression diagnosis process are established: response generation, topic prediction, dialog summary, and severity classification of depressive episode and suicide risk. Multi-scale evaluation results demonstrate that a more empathy-driven and diagnostic-accurate consultation dialogue system trained on our dataset can be achieved compared to rule-based bots.", + "author": "Binwei Yao; Chao Shi; Likai Zou; Lingfeng Dai; Mengyue Wu; Lu Chen; Zhen Wang; Kai Yu", + "authorids": "/b/binwei-yao/; /c/chao-shi/; /l/likai-zou/; /l/lingfeng-dai/; /m/mengyue-wu/; /l/lu-chen/; /z/zhen-wang/; /k/kai-yu/", + "bibtex": "@inproceedings{yao-etal-2022-d4,\n title = \"D4: a {C}hinese Dialogue Dataset for Depression-Diagnosis-Oriented Chat\",\n author = \"Yao, Binwei and\n Shi, Chao and\n Zou, Likai and\n Dai, Lingfeng and\n Wu, Mengyue and\n Chen, Lu and\n Wang, Zhen and\n Yu, Kai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.156/\",\n doi = \"10.18653/v1/2022.emnlp-main.156\",\n pages = \"2438--2459\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.156.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.156/", + "pdf_size": 7186084, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12004798325660636069&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "SJTU X-LANCE Lab, Department of Computer Science and Engineering+MoE Key Lab of Artificial Intelligence, SJTU AI Institute+Shanghai Jiao Tong University, Shanghai, China; SJTU X-LANCE Lab, Department of Computer Science and Engineering+MoE Key Lab of Artificial Intelligence, SJTU AI Institute+Shanghai Jiao Tong University, Shanghai, China; SJTU X-LANCE Lab, Department of Computer Science and Engineering+MoE Key Lab of Artificial Intelligence, SJTU AI Institute+Shanghai Jiao Tong University, Shanghai, China; SJTU X-LANCE Lab, Department of Computer Science and Engineering+MoE Key Lab of Artificial Intelligence, SJTU AI Institute+Shanghai Jiao Tong University, Shanghai, China; SJTU X-LANCE Lab, Department of Computer Science and Engineering+MoE Key Lab of Artificial Intelligence, SJTU AI Institute+Shanghai Jiao Tong University, Shanghai, China; SJTU X-LANCE Lab, Department of Computer Science and Engineering+MoE Key Lab of Artificial Intelligence, SJTU AI Institute+Shanghai Jiao Tong University, Shanghai, China; Shanghai Mental Health Center+Shanghai Jiao Tong University School of Medicine, Shanghai, China; SJTU X-LANCE Lab, Department of Computer Science and Engineering+MoE Key Lab of Artificial Intelligence, SJTU AI Institute+Shanghai Jiao Tong University, Shanghai, China", + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "https://x-lance.github.io/D4", + "project": "", + "author_num": 8, + "aff_unique_index": "0+0+0;0+0+0;0+0+0;0+0+0;0+0+0;0+0+0;1+0;0+0+0", + "aff_unique_norm": "Shanghai Jiao Tong University;Shanghai Mental Health Center", + "aff_unique_dep": "Department of Computer Science and Engineering;", + "aff_unique_url": "https://www.sjtu.edu.cn;http://www.smhc.org.cn", + "aff_unique_abbr": "SJTU;", + "aff_campus_unique_index": "1;1;1;1;1;1;1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0+0+0;0+0+0;0+0+0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.83", + "title": "DANLI: Deliberative Agent for Following Natural Language Instructions", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent years have seen an increasing amount of work on embodied AI agents that can perform tasks by following human language instructions. However, most of these agents are reactive, meaning that they simply learn and imitate behaviors encountered in the training data. These reactive agents are insufficient for long-horizon complex tasks. To address this limitation, we propose a neuro-symbolic deliberative agent that, while following language instructions, proactively applies reasoning and planning based on its neural and symbolic representations acquired from past experience (e.g., natural language and egocentric vision). We show that our deliberative agent achieves greater than 70% improvement over reactive baselines on the challenging TEACh benchmark. Moreover, the underlying reasoning and planning processes, together with our modular framework, offer impressive transparency and explainability to the behaviors of the agent. This enables an in-depth understanding of the agent\u2019s capabilities, which shed light on challenges and opportunities for future embodied agents for instruction following. The code is available at https://github.com/sled-group/DANLI.", + "author": "Yichi Zhang; Jianing Yang; Jiayi Pan; Shane Storks; Nikhil Devraj; Ziqiao Ma; Keunwoo Yu; Yuwei Bao; Joyce Chai", + "authorids": "/y/yichi-zhang/; /j/jianing-yang/; /j/jiayi-pan/; /s/shane-storks/; /n/nikhil-devraj/; /z/ziqiao-ma/; /k/keunwoo-yu/; /y/yuwei-bao/; /j/joyce-chai/", + "bibtex": "@inproceedings{zhang-etal-2022-danli,\n title = \"{DANLI}: Deliberative Agent for Following Natural Language Instructions\",\n author = \"Zhang, Yichi and\n Yang, Jianing and\n Pan, Jiayi and\n Storks, Shane and\n Devraj, Nikhil and\n Ma, Ziqiao and\n Yu, Keunwoo and\n Bao, Yuwei and\n Chai, Joyce\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.83/\",\n doi = \"10.18653/v1/2022.emnlp-main.83\",\n pages = \"1280--1298\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.83.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.83/", + "pdf_size": 2205442, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2428854157130981732&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Computer Science and Engineering Division, University of Michigan; Computer Science and Engineering Division, University of Michigan; Computer Science and Engineering Division, University of Michigan; Computer Science and Engineering Division, University of Michigan; Computer Science and Engineering Division, University of Michigan; Computer Science and Engineering Division, University of Michigan; Computer Science and Engineering Division, University of Michigan; Computer Science and Engineering Division, University of Michigan; Computer Science and Engineering Division, University of Michigan", + "aff_domain": "umich.edu; ; ; ; ; ; ; ; ", + "email": "umich.edu; ; ; ; ; ; ; ; ", + "github": "https://github.com/sled-group/DANLI", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "University of Michigan", + "aff_unique_dep": "Computer Science and Engineering Division", + "aff_unique_url": "https://www.umich.edu", + "aff_unique_abbr": "UM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.448", + "title": "DEER: Descriptive Knowledge Graph for Explaining Entity Relationships", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We propose DEER (Descriptive Knowledge Graph for Explaining Entity Relationships) - an open and informative form of modeling entity relationships. In DEER, relationships between entities are represented by free-text relation descriptions. For instance, the relationship between entities of machine learning and algorithm can be represented as \u201cMachine learning explores the study and construction of algorithms that can learn from and make predictions on data.\u201d To construct DEER, we propose a self-supervised learning method to extract relation descriptions with the analysis of dependency patterns and generate relation descriptions with a transformer-based relation description synthesizing model, where no human labeling is required. Experiments demonstrate that our system can extract and generate high-quality relation descriptions for explaining entity relationships. The results suggest that we can build an open and informative knowledge graph without human annotation.", + "author": "Jie Huang; Kerui Zhu; Kevin Chen-Chuan Chang; Jinjun Xiong; Wen-mei Hwu", + "authorids": "/j/jie-huang/; /k/kerui-zhu/; /k/kevin-chen-chuan-chang/; /j/jinjun-xiong/; /w/wen-mei-hwu/", + "bibtex": "@inproceedings{huang-etal-2022-deer,\n title = \"{DEER}: Descriptive Knowledge Graph for Explaining Entity Relationships\",\n author = \"Huang, Jie and\n Zhu, Kerui and\n Chang, Kevin Chen-Chuan and\n Xiong, Jinjun and\n Hwu, Wen-mei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.448/\",\n doi = \"10.18653/v1/2022.emnlp-main.448\",\n pages = \"6686--6698\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.448.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.448/", + "pdf_size": 596958, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12416464947293186433&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 6, + "aff": "University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University at Buffalo; University of Illinois at Urbana-Champaign + NVIDIA", + "aff_domain": "illinois.edu;illinois.edu;illinois.edu;buffalo.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu;buffalo.edu;illinois.edu", + "github": "https://github.com/jeffhj/DEER", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0+2", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;University at Buffalo;NVIDIA Corporation", + "aff_unique_dep": ";;", + "aff_unique_url": "https://illinois.edu;https://www.buffalo.edu;https://www.nvidia.com", + "aff_unique_abbr": "UIUC;UB;NVIDIA", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.649", + "title": "DEMETR: Diagnosing Evaluation Metrics for Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "While machine translation evaluation metrics based on string overlap (e.g., BLEU) have their limitations, their computations are transparent: the BLEU score assigned to a particular candidate translation can be traced back to the presence or absence of certain words. The operations of newer learned metrics (e.g., BLEURT, COMET), which leverage pretrained language models to achieve higher correlations with human quality judgments than BLEU, are opaque in comparison. In this paper, we shed light on the behavior of these learned metrics by creating DEMETR, a diagnostic dataset with 31K English examples (translated from 10 source languages) for evaluating the sensitivity of MT evaluation metrics to 35 different linguistic perturbations spanning semantic, syntactic, and morphological error categories. All perturbations were carefully designed to form minimal pairs with the actual translation (i.e., differ in only one aspect). We find that learned metrics perform substantially better than string-based metrics on DEMETR. Additionally, learned metrics differ in their sensitivity to various phenomena (e.g., BERTScore is sensitive to untranslated words but relatively insensitive to gender manipulation, while COMET is much more sensitive to word repetition than to aspectual changes). We publicly release DEMETR to spur more informed future development of machine translation evaluation metrics", + "author": "Marzena Karpinska; Nishant Raj; Katherine Thai; Yixiao Song; Ankita Gupta; Mohit Iyyer", + "authorids": "/m/marzena-karpinska/; /n/nishant-raj/; /k/katherine-thai/; /y/yixiao-song/; /a/ankita-gupta/; /m/mohit-iyyer/", + "bibtex": "@inproceedings{karpinska-etal-2022-demetr,\n title = \"{DEMETR}: Diagnosing Evaluation Metrics for Translation\",\n author = \"Karpinska, Marzena and\n Raj, Nishant and\n Thai, Katherine and\n Song, Yixiao and\n Gupta, Ankita and\n Iyyer, Mohit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.649/\",\n doi = \"10.18653/v1/2022.emnlp-main.649\",\n pages = \"9540--9561\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.649.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.649/", + "pdf_size": 667533, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1957107748644519730&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Manning College of Information and Computer Sciences, UMass Amherst\u2662; Manning College of Information and Computer Sciences, UMass Amherst\u2662; Manning College of Information and Computer Sciences, UMass Amherst\u2662; Department of Linguistics, UMass Amherst\u2660; Manning College of Information and Computer Sciences, UMass Amherst\u2662; Manning College of Information and Computer Sciences, UMass Amherst\u2662", + "aff_domain": "cs.umass.edu;umass.edu;cs.umass.edu;umass.edu;cs.umass.edu;cs.umass.edu", + "email": "cs.umass.edu;umass.edu;cs.umass.edu;umass.edu;cs.umass.edu;cs.umass.edu", + "github": "https://github.com/marzenakrp/demetr", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "University of Massachusetts Amherst", + "aff_unique_dep": "Manning College of Information and Computer Sciences", + "aff_unique_url": "https://www.umass.edu", + "aff_unique_abbr": "UMass Amherst", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Amherst", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.491", + "title": "DIGAT: Modeling News Recommendation with Dual-Graph Interaction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "News recommendation (NR) is essential for online news services. Existing NR methods typically adopt a news-user representation learning framework, facing two potential limitations. First, in news encoder, single candidate news encoding suffers from an insufficient semantic information problem. Second, existing graph-based NR methods are promising but lack effective news-user feature interaction, rendering the graph-based recommendation suboptimal. To overcome these limitations, we propose dual-interactive graph attention networks (DIGAT) consisting of news- and user-graph channels. In the news-graph channel, we enrich the semantics of single candidate news by incorporating the semantically relevant news information with a semantic-augmented graph (SAG). In the user-graph channel, multi-level user interests are represented with a news-topic graph. Most notably, we design a dual-graph interaction process to perform effective feature interaction between the news and user graphs, which facilitates accurate news-user representation matching. Experiment results on the benchmark dataset MIND show that DIGAT outperforms existing news recommendation methods. Further ablation studies and analyses validate the effectiveness of (1) semantic-augmented news graph modeling and (2) dual-graph interaction.", + "author": "Zhiming Mao; Jian Li; Hongru Wang; Xingshan Zeng; Kam-Fai Wong", + "authorids": "/z/zhiming-mao/; /j/jian-li/; /h/hongru-wang/; /x/xingshan-zeng/; /k/kam-fai-wong/", + "bibtex": "@inproceedings{mao-etal-2022-digat,\n title = \"{DIGAT}: Modeling News Recommendation with Dual-Graph Interaction\",\n author = \"Mao, Zhiming and\n Li, Jian and\n Wang, Hongru and\n Zeng, Xingshan and\n Wong, Kam-Fai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.491/\",\n doi = \"10.18653/v1/2022.findings-emnlp.491\",\n pages = \"6595--6607\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.491.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.491/", + "pdf_size": 3712019, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8236481350443000562&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "The Chinese University of Hong Kong, Hong Kong, China + MoE Key Laboratory of High Confidence Software Technologies, China; Tencent, Shenzhen, China; The Chinese University of Hong Kong, Hong Kong, China + MoE Key Laboratory of High Confidence Software Technologies, China; ; The Chinese University of Hong Kong, Hong Kong, China + MoE Key Laboratory of High Confidence Software Technologies, China", + "aff_domain": "se.cuhk.edu.hk;se.cuhk.edu.hk;se.cuhk.edu.hk;gmail.com;gmail.com", + "email": "se.cuhk.edu.hk;se.cuhk.edu.hk;se.cuhk.edu.hk;gmail.com;gmail.com", + "github": "https://github.com/Veason-silverbullet/DIGAT", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;0+1;0+1", + "aff_unique_norm": "The Chinese University of Hong Kong;MoE Key Laboratory of High Confidence Software Technologies;Tencent", + "aff_unique_dep": ";High Confidence Software Technologies;", + "aff_unique_url": "https://www.cuhk.edu.hk;;https://www.tencent.com", + "aff_unique_abbr": "CUHK;;Tencent", + "aff_campus_unique_index": "0;2;0;0", + "aff_campus_unique": "Hong Kong;;Shenzhen", + "aff_country_unique_index": "0+0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.253", + "title": "DORE: Document Ordered Relation Extraction based on Generative Framework", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In recent years, there is a surge of generation-based information extraction work, which allows a more direct use of pre-trained language models and efficiently captures output dependencies. However, previous generative methods using lexical representation do not naturally fit document-level relation extraction (DocRE) where there are multiple entities and relational facts. In this paper, we investigate the root cause of the underwhelming performance of the existing generative DocRE models and discover that the culprit is the inadequacy of the training paradigm, instead of the capacities of the models. We propose to generate a symbolic and ordered sequence from the relation matrix which is deterministic and easier for model to learn. Moreover, we design a parallel row generation method to process overlong target sequences. Besides, we introduce several negative sampling strategies to improve the performance with balanced signals. Experimental results on four datasets show that our proposed method can improve the performance of the generative DocRE models.", + "author": "Qipeng Guo; Yuqing Yang; Hang Yan; Xipeng Qiu; Zheng Zhang", + "authorids": "/q/qipeng-guo/; /y/yuqing-yang/; /h/hang-yan/; /x/xipeng-qiu/; /z/zheng-zhang/", + "bibtex": "@inproceedings{guo-etal-2022-dore,\n title = \"{DORE}: Document Ordered Relation Extraction based on Generative Framework\",\n author = \"Guo, Qipeng and\n Yang, Yuqing and\n Yan, Hang and\n Qiu, Xipeng and\n Zhang, Zheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.253/\",\n doi = \"10.18653/v1/2022.findings-emnlp.253\",\n pages = \"3463--3474\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.253.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.253/", + "pdf_size": 482212, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15925306554257951936&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Amazon AWS AI; School of Computer Science, Fudan University + Amazon Shanghai AI Lab; School of Computer Science, Fudan University; School of Computer Science, Fudan University; Amazon AWS AI", + "aff_domain": "amazon.com;m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;amazon.com", + "email": "amazon.com;m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;amazon.com", + "github": "https://github.com/ayyyq/DORE", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+0;1;1;0", + "aff_unique_norm": "Amazon;Fudan University", + "aff_unique_dep": "Amazon Web Services AI;School of Computer Science", + "aff_unique_url": "https://aws.amazon.com;https://www.fudan.edu.cn", + "aff_unique_abbr": "AWS;Fudan", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;1+1;1;1;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.354", + "title": "DOROTHIE: Spoken Dialogue for Handling Unexpected Situations in Interactive Autonomous Driving Agents", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In the real world, autonomous driving agents navigate in highly dynamic environments full of unexpected situations where pre-trained models are unreliable. In these situations, what is immediately available to vehicles is often only human operators. Empowering autonomous driving agents with the ability to navigate in a continuous and dynamic environment and to communicate with humans through sensorimotor-grounded dialogue becomes critical. To this end, we introduce Dialogue On the ROad To Handle Irregular Events (DOROTHIE), a novel interactive simulation platform that enables the creation of unexpected situations on the fly to support empirical studies on situated communication with autonomous driving agents. Based on this platform, we created the Situated Dialogue Navigation (SDN), a navigation benchmark of 183 trials with a total of 8415 utterances, around 18.7 hours of control streams, and 2.9 hours of trimmed audio. SDN is developed to evaluate the agent\u2019s ability to predict dialogue moves from humans as well as generate its own dialogue moves and physical navigation actions. We further developed a transformer-based baseline model for these SDN tasks. Our empirical results indicate that language guided-navigation in a highly dynamic environment is an extremely difficult task for end-to-end models. These results will provide insight towards future work on robust autonomous driving agents", + "author": "Ziqiao Ma; Benjamin VanDerPloeg; Cristian-Paul Bara; Yidong Huang; Eui-In Kim; Felix Gervits; Matthew Marge; Joyce Chai", + "authorids": "/z/ziqiao-ma/; /b/benjamin-vanderploeg/; /c/cristian-paul-bara/; /y/yidong-huang/; /e/eui-in-kim/; /f/felix-gervits/; /m/matthew-marge/; /j/joyce-chai/", + "bibtex": "@inproceedings{ma-etal-2022-dorothie,\n title = \"{DOROTHIE}: Spoken Dialogue for Handling Unexpected Situations in Interactive Autonomous Driving Agents\",\n author = \"Ma, Ziqiao and\n VanDerPloeg, Benjamin and\n Bara, Cristian-Paul and\n Huang, Yidong and\n Kim, Eui-In and\n Gervits, Felix and\n Marge, Matthew and\n Chai, Joyce\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.354/\",\n doi = \"10.18653/v1/2022.findings-emnlp.354\",\n pages = \"4800--4822\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.354.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.354/", + "pdf_size": 7566599, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16265824796697372627&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "University of Michigan; University of Michigan; University of Michigan+Amazon Alexa AI; University of Michigan; University of Michigan; U.S. Army Research Laboratory; U.S. Army Research Laboratory; University of Michigan", + "aff_domain": "umich.edu;umich.edu;umich.edu;umich.edu;umich.edu;army.mil;army.mil;umich.edu", + "email": "umich.edu;umich.edu;umich.edu;umich.edu;umich.edu;army.mil;army.mil;umich.edu", + "github": "https://github.com/sled-group/DOROTHIE", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0+1;0;0;2;2;0", + "aff_unique_norm": "University of Michigan;Amazon;U.S. Army Research Laboratory", + "aff_unique_dep": ";Alexa AI;", + "aff_unique_url": "https://www.umich.edu;https://www.amazon.com;https://www.arl.army.mil", + "aff_unique_abbr": "UM;Amazon;ARL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.342", + "title": "DRLK: Dynamic Hierarchical Reasoning with Language Model and Knowledge Graph for Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In recent years, Graph Neural Network (GNN) approaches with enhanced knowledge graphs (KG) perform well in question answering (QA) tasks. One critical challenge is how to effectively utilize interactions between the QA context and KG. However, existing work only adopts the identical QA context representation to interact with multiple layers of KG, which results in a restricted interaction. In this paper, we propose DRLK (Dynamic Hierarchical Reasoning with Language Model and Knowledge Graphs), a novel model that utilizes dynamic hierarchical interactions between the QA context and KG for reasoning. DRLK extracts dynamic hierarchical features in the QA context, and performs inter-layer and intra-layer interactions on each iteration, allowing the KG representation to be grounded with the hierarchical features of the QA context. We conduct extensive experiments on four benchmark datasets in medical QA and commonsense reasoning. The experimental results demonstrate that DRLK achieves state-of-the-art performances on two benchmark datasets and performs competitively on the others.", + "author": "Miao Zhang; Rufeng Dai; Ming Dong; Tingting He", + "authorids": "/m/miao-zhang/; /r/rufeng-dai/; /m/ming-dong/; /t/tingting-he/", + "bibtex": "@inproceedings{zhang-etal-2022-drlk,\n title = \"{DRLK}: Dynamic Hierarchical Reasoning with Language Model and Knowledge Graph for Question Answering\",\n author = \"Zhang, Miao and\n Dai, Rufeng and\n Dong, Ming and\n He, Tingting\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.342/\",\n doi = \"10.18653/v1/2022.emnlp-main.342\",\n pages = \"5123--5133\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.342.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.342/", + "pdf_size": 491946, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13184028338617622887&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "National Engineering Research Center for E-Learning + Hubei Provincial Key Laboratory of Arti\ufb01cial Intelligence and Smart Learning + National Language Resources Monitoring and Research Center for Network Media + School of Computer, Central China Normal University; National Engineering Research Center for E-Learning + Hubei Provincial Key Laboratory of Arti\ufb01cial Intelligence and Smart Learning + National Language Resources Monitoring and Research Center for Network Media + School of Computer, Central China Normal University; National Engineering Research Center for E-Learning + Hubei Provincial Key Laboratory of Arti\ufb01cial Intelligence and Smart Learning + National Language Resources Monitoring and Research Center for Network Media + School of Computer, Central China Normal University; National Engineering Research Center for E-Learning + Hubei Provincial Key Laboratory of Arti\ufb01cial Intelligence and Smart Learning + National Language Resources Monitoring and Research Center for Network Media + School of Computer, Central China Normal University", + "aff_domain": "mails.ccnu.edu.cn;mails.ccnu.edu.cn;ccnu.edu.cn;ccnu.edu.cn", + "email": "mails.ccnu.edu.cn;mails.ccnu.edu.cn;ccnu.edu.cn;ccnu.edu.cn", + "github": "https://github.com/MZ-MiaoZhang/DRLK", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1+2+3;0+1+2+3;0+1+2+3;0+1+2+3", + "aff_unique_norm": "National Engineering Research Center for E-Learning;Hubei Provincial Key Laboratory of Artificial Intelligence and Smart Learning;National Language Resources Monitoring and Research Center;Central China Normal University", + "aff_unique_dep": ";Artificial Intelligence and Smart Learning;Center for Network Media;School of Computer", + "aff_unique_url": ";;;http://www.ccnu.edu.cn", + "aff_unique_abbr": ";;;CCNU", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0+0;0+0+0+0;0+0+0+0;0+0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.281", + "title": "DSM: Question Generation over Knowledge Base via Modeling Diverse Subgraphs with Meta-learner", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing methods on knowledge base question generation (KBQG) learn a one-size-fits-all model by training together all subgraphs without distinguishing the diverse semantics of subgraphs. In this work, we show that making use of the past experience on semantically similar subgraphs can reduce the learning difficulty and promote the performance of KBQG models. To achieve this, we propose a novel approach to model diverse subgraphs with meta-learner (DSM). Specifically, we devise a graph contrastive learning-based retriever to identify semantically similar subgraphs, so that we can construct the semantics-aware learning tasks for the meta-learner to learn semantics-specific and semantics-agnostic knowledge on and across these tasks. Extensive experiments on two widely-adopted benchmarks for KBQG show that DSM derives new state-of-the-art performance and benefits the question answering tasks as a means of data augmentation.", + "author": "Shasha Guo; Jing Zhang; Yanling Wang; Qianyi Zhang; Cuiping Li; Hong Chen", + "authorids": "/s/shasha-guo/; /j/jing-zhang/; /y/yanling-wang/; /q/qianyi-zhang/; /c/cuiping-li/; /h/hong-chen/", + "bibtex": "@inproceedings{guo-etal-2022-dsm,\n title = \"{DSM}: Question Generation over Knowledge Base via Modeling Diverse Subgraphs with Meta-learner\",\n author = \"Guo, Shasha and\n Zhang, Jing and\n Wang, Yanling and\n Zhang, Qianyi and\n Li, Cuiping and\n Chen, Hong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.281/\",\n doi = \"10.18653/v1/2022.emnlp-main.281\",\n pages = \"4194--4207\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.281.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.281/", + "pdf_size": 678108, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14281259416148116757&as_sdt=8000005&sciodt=0,19&hl=en", + "gs_version_total": 2, + "aff": "School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education; School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education; School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education; School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education; School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education; School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education", + "aff_domain": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "github": "https://github.com/RUCKBReasoning/DSM", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+1;0+1;0+1;0+1", + "aff_unique_norm": "Renmin University of China;Ministry of Education", + "aff_unique_dep": "School of Information;Key Laboratory of Data Engineering and Knowledge Engineering", + "aff_unique_url": "http://www.ruc.edu.cn;", + "aff_unique_abbr": "RUC;", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.410", + "title": "Data Cartography for Low-Resource Neural Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While collecting or generating more parallel data is necessary to improve machine translation (MT) in low-resource settings, we lack an understanding of how the limited amounts of existing data are actually used to help guide the collection of further resources. In this paper, we apply data cartography techniques (Swayamdipta et al., 2020) to characterize the contribution of training samples in two low-resource MT tasks (Swahili-English and Turkish-English) throughout the training of standard neural MT models. Our empirical study shows that, unlike in prior work for classification tasks, most samples contribute to model training in low-resource MT, albeit not uniformly throughout the training process. Furthermore, uni-dimensional characterizations of samples \u2013 e.g., based on dual cross-entropy or word frequency \u2013 do not suffice to characterize to what degree they are hard or easy to learn. Taken together, our results suggest that data augmentation strategies for low-resource MT would benefit from model-in-the-loop strategies to maximize improvements.", + "author": "Aquia Richburg; Marine Carpuat", + "authorids": "/a/aquia-richburg/; /m/marine-carpuat/", + "bibtex": "@inproceedings{richburg-carpuat-2022-data,\n title = \"Data Cartography for Low-Resource Neural Machine Translation\",\n author = \"Richburg, Aquia and\n Carpuat, Marine\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.410/\",\n doi = \"10.18653/v1/2022.findings-emnlp.410\",\n pages = \"5594--5607\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.410.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.410/", + "pdf_size": 975993, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9708992227309547546&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "AMSC, University of Maryland; Computer Science & UMIACS, University of Maryland", + "aff_domain": "umd.edu;umd.edu", + "email": "umd.edu;umd.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Maryland", + "aff_unique_dep": "AMSC", + "aff_unique_url": "https://www.umd.edu", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.113", + "title": "Data Selection Curriculum for Neural Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Neural Machine Translation (NMT) models are typically trained on heterogeneous data that are concatenated and randomly shuffled. However, not all of the training data are equally useful to the model. Curriculum training aims to present the data to the NMT models in a meaningful order. In this work, we introduce a two-stage training framework for NMT where we fine-tune a base NMT model on subsets of data, selected by both deterministic scoring using pre-trained methods and online scoring that considers prediction scores of the emerging NMT model. Through comprehensive experiments on six language pairs comprising low- and high-resource languages from WMT\u201921, we have shown that our curriculum strategies consistently demonstrate better quality (up to +2.2 BLEU improvement) and faster convergence (approximately 50% fewer updates).", + "author": "Tasnim Mohiuddin; Philipp Koehn; Vishrav Chaudhary; James Cross; Shruti Bhosale; Shafiq Joty", + "authorids": "/m/muhammad-tasnim-mohiuddin/; /p/philipp-koehn/; /v/vishrav-chaudhary/; /j/james-cross/; /s/shruti-bhosale/; /s/shafiq-joty/", + "bibtex": "@inproceedings{mohiuddin-etal-2022-data,\n title = \"Data Selection Curriculum for Neural Machine Translation\",\n author = \"Mohiuddin, Tasnim and\n Koehn, Philipp and\n Chaudhary, Vishrav and\n Cross, James and\n Bhosale, Shruti and\n Joty, Shafiq\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.113/\",\n doi = \"10.18653/v1/2022.findings-emnlp.113\",\n pages = \"1569--1582\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.113.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.113/", + "pdf_size": 946469, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16728158044507947285&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Nanyang Technological University+Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Nanyang Technological University+Meta AI", + "aff_domain": "ntu.edu.sg;fb.com;fb.com;fb.com;fb.com;ntu.edu.sg", + "email": "ntu.edu.sg;fb.com;fb.com;fb.com;fb.com;ntu.edu.sg", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;1;1;1;0+1", + "aff_unique_norm": "Nanyang Technological University;Meta Platforms, Inc.", + "aff_unique_dep": ";Meta AI", + "aff_unique_url": "https://www.ntu.edu.sg;https://meta.com", + "aff_unique_abbr": "NTU;Meta", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;1;1;1;0+1", + "aff_country_unique": "Singapore;United States" + }, + { + "id": "2022.findings-emnlp.433", + "title": "Data-Efficient Concept Extraction from Pre-trained Language Models for Commonsense Explanation Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Predicting the key explanation concept is essential for generating commonsense explanations. This paper introduces a method to predict the concept from pre-trained language models for commonsense explanation generation. Our experiment found that adopting a language model as the concept extractor and fine-tuning it with 20% training data can improve the quality and accuracy of the generated explanations over multiple evaluation metrics. Compared with conventional methods that search concepts over knowledge graphs, our method does not require the preparation and training models to search through knowledge graphs. To better understand the results from pre-trained language models, we also designed a metric to evaluate the retrieved concepts. Through analysis and experiments, we show the correlation between this metric and the performance of the generators, and we also show the importance of attaching concepts for generating high-quality sentences.", + "author": "Yanbo Fang; Yongfeng Zhang", + "authorids": "/y/yanbo-fang/; /y/yongfeng-zhang/", + "bibtex": "@inproceedings{fang-zhang-2022-data,\n title = \"Data-Efficient Concept Extraction from Pre-trained Language Models for Commonsense Explanation Generation\",\n author = \"Fang, Yanbo and\n Zhang, Yongfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.433/\",\n doi = \"10.18653/v1/2022.findings-emnlp.433\",\n pages = \"5883--5893\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.433.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.433/", + "pdf_size": 488204, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4088176099518677684&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 8, + "aff": "Department of Computer Science, Rutgers University, New Brunswick, NJ, US; Department of Computer Science, Rutgers University, New Brunswick, NJ, US", + "aff_domain": "rutgers.edu;rutgers.edu", + "email": "rutgers.edu;rutgers.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Rutgers University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.rutgers.edu", + "aff_unique_abbr": "Rutgers", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "New Brunswick", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.784", + "title": "Data-Efficient Playlist Captioning With Musical and Linguistic Knowledge", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Music streaming services feature billions of playlists created by users, professional editors or algorithms. In this content overload scenario, it is crucial to characterise playlists, so that music can be effectively organised and accessed. Playlist titles and descriptions are proposed in natural language either manually by music editors and users or automatically from pre-defined templates. However, the former is time-consuming while the latter is limited by the vocabulary and covered music themes. In this work, we propose PlayNTell, a data-efficient multi-modal encoder-decoder model for automatic playlist captioning. Compared to existing music captioning algorithms, PlayNTell leverages also linguistic and musical knowledge to generate correct and thematic captions. We benchmark PlayNTell on a new editorial playlists dataset collected from two major music streaming services.PlayNTell yields 2x-3x higher BLEU@4 and CIDEr than state of the art captioning algorithms.", + "author": "Giovanni Gabbolini; Romain Hennequin; Elena Epure", + "authorids": "/g/giovanni-gabbolini/; /r/romain-hennequin/; /e/elena-epure/", + "bibtex": "@inproceedings{gabbolini-etal-2022-data,\n title = \"Data-Efficient Playlist Captioning With Musical and Linguistic Knowledge\",\n author = \"Gabbolini, Giovanni and\n Hennequin, Romain and\n Epure, Elena\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.784/\",\n doi = \"10.18653/v1/2022.emnlp-main.784\",\n pages = \"11401--11415\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.784.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.784/", + "pdf_size": 1202520, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11615779246009633356&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Insight Centre for Data Analytics, School of Computer Science & IT, University College Cork, Ireland; Deezer Research, Paris, France; Deezer Research, Paris, France", + "aff_domain": "insight-centre.org;deezer.com;deezer.com", + "email": "insight-centre.org;deezer.com;deezer.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "University College Cork;Deezer Research", + "aff_unique_dep": "School of Computer Science & IT;Research", + "aff_unique_url": "https://www.ucc.ie;https://www.deezer.com", + "aff_unique_abbr": "UCC;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Paris", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "Ireland;France" + }, + { + "id": "2022.emnlp-main.383", + "title": "Data-Efficient Strategies for Expanding Hate Speech Detection into Under-Resourced Languages", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Hate speech is a global phenomenon, but most hate speech datasets so far focus on English-language content. This hinders the development of more effective hate speech detection models in hundreds of languages spoken by billions across the world. More data is needed, but annotating hateful content is expensive, time-consuming and potentially harmful to annotators. To mitigate these issues, we explore data-efficient strategies for expanding hate speech detection into under-resourced languages. In a series of experiments with mono- and multilingual models across five non-English languages, we find that 1) a small amount of target-language fine-tuning data is needed to achieve strong performance, 2) the benefits of using more such data decrease exponentially, and 3) initial fine-tuning on readily-available English data can partially substitute target-language data and improve model generalisability. Based on these findings, we formulate actionable recommendations for hate speech detection in low-resource language settings.", + "author": "Paul R\u00f6ttger; Debora Nozza; Federico Bianchi; Dirk Hovy", + "authorids": "/p/paul-rottger/; /d/debora-nozza/; /f/federico-bianchi/; /d/dirk-hovy/", + "bibtex": "@inproceedings{rottger-etal-2022-data,\n title = \"Data-Efficient Strategies for Expanding Hate Speech Detection into Under-Resourced Languages\",\n author = {R{\\\"o}ttger, Paul and\n Nozza, Debora and\n Bianchi, Federico and\n Hovy, Dirk},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.383/\",\n doi = \"10.18653/v1/2022.emnlp-main.383\",\n pages = \"5674--5691\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.383.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.383/", + "pdf_size": 1755282, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18038182660544256552&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.596", + "title": "Dealing with Abbreviations in the Slovenian Biographical Lexicon", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Abbreviations present a significant challenge for NLP systems because they cause tokenization and out-of-vocabulary errors. They can also make the text less readable, especially in reference printed books, where they are extensively used. Abbreviations are especially problematic in low-resource settings, where systems are less robust to begin with. In this paper, we propose a new method for addressing the problems caused by a high density of domain-specific abbreviations in a text. We apply this method to the case of a Slovenian biographical lexicon and evaluate it on a newly developed gold-standard dataset of 51 Slovenian biographies. Our abbreviation identification method performs significantly better than commonly used ad-hoc solutions, especially at identifying unseen abbreviations. We also propose and present the results of a method for expanding the identified abbreviations in context.", + "author": "Angel Daza; Antske Fokkens; Toma\u017e Erjavec", + "authorids": "/a/angel-daza/; /a/antske-fokkens/; /t/tomaz-erjavec/", + "bibtex": "@inproceedings{daza-etal-2022-dealing,\n title = \"Dealing with Abbreviations in the {S}lovenian Biographical Lexicon\",\n author = \"Daza, Angel and\n Fokkens, Antske and\n Erjavec, Toma{\\v{z}}\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.596/\",\n doi = \"10.18653/v1/2022.emnlp-main.596\",\n pages = \"8715--8720\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.596.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.596/", + "pdf_size": 240577, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7429183367718067767&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": "CLTL, Vrije Universiteit Amsterdam; CLTL, Vrije Universiteit Amsterdam; Department of Knowledge Technologies, Jo\u017eef Stefan Institute", + "aff_domain": "vu.nl;vu.nl;ijs.si", + "email": "vu.nl;vu.nl;ijs.si", + "github": "https://github.com/angel-daza/abbreviation-detector8715", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Vrije Universiteit Amsterdam;Jo\u017eef Stefan Institute", + "aff_unique_dep": "CLTL;Department of Knowledge Technologies", + "aff_unique_url": "https://www.vu.nl;https://www.ijs.si", + "aff_unique_abbr": "VU Amsterdam;JSI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "Netherlands;Slovenia" + }, + { + "id": "2022.findings-emnlp.213", + "title": "DebiasGAN: Eliminating Position Bias in News Recommendation with Adversarial Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Click behaviors are widely used for learning news recommendation models, but they are heavily affected by the biases brought by the news display positions. It is important to remove position biases to train unbiased recommendation model and capture unbiased user interest. In this paper, we propose a news recommendation method named DebiasGAN that can effectively alleviate position biases via adversarial learning. The core idea is modeling the personalized effect of position bias on click behaviors in a candidate-aware way, and learning debiased candidate-aware user embeddings from which the position information cannot be discriminated. More specifically, we use a bias-aware click model to capture the effect of position bias on click behaviors, and use a bias-invariant click model with random candidate positions to estimate the ideally unbiased click scores. We apply adversarial learning to the embeddings learned by the two models to help the bias-invariant click model capture debiased user interest. Experimental results on two real-world datasets show that DebiasGAN effectively improves news recommendation by eliminating position biases.", + "author": "Chuhan Wu; Fangzhao Wu; Xiangnan He; Yongfeng Huang", + "authorids": "/c/chuhan-wu/; /f/fangzhao-wu/; /x/xiangnan-he/; /y/yongfeng-huang/", + "bibtex": "@inproceedings{wu-etal-2022-debiasgan,\n title = \"{D}ebias{GAN}: Eliminating Position Bias in News Recommendation with Adversarial Learning\",\n author = \"Wu, Chuhan and\n Wu, Fangzhao and\n He, Xiangnan and\n Huang, Yongfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.213/\",\n doi = \"10.18653/v1/2022.findings-emnlp.213\",\n pages = \"2933--2938\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.213.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.213/", + "pdf_size": 560881, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12275075414510729753&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Department of Electronic Engineering, Tsinghua University; Microsoft Research Asia; University of Science and Technology of China; Zhongguancun Laboratory + Department of Electronic Engineering, Tsinghua University", + "aff_domain": "gmail.com;gmail.com;gmail.com;tsinghua.edu.cn", + "email": "gmail.com;gmail.com;gmail.com;tsinghua.edu.cn", + "github": "https://github.com/wuch15/DebiasGAN", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3+0", + "aff_unique_norm": "Tsinghua University;Microsoft Research;University of Science and Technology of China;Zhongguancun Laboratory", + "aff_unique_dep": "Department of Electronic Engineering;Research;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.microsoft.com/en-us/research/group/asia;http://www.ustc.edu.cn;", + "aff_unique_abbr": "THU;MSR Asia;USTC;", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.517", + "title": "Debiasing Masks: A New Framework for Shortcut Mitigation in NLU", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Debiasing language models from unwanted behaviors in Natural Language Understanding (NLU) tasks is a topic with rapidly increasing interest in the NLP community. Spurious statistical correlations in the data allow models to perform shortcuts and avoid uncovering more advanced and desirable linguistic features.A multitude of effective debiasing approaches has been proposed, but flexibility remains a major issue. For the most part, models must be retrained to find a new set of weights with debiased behavior.We propose a new debiasing method in which we identify debiased pruning masks that can be applied to a finetuned model. This enables the selective and conditional application of debiasing behaviors.We assume that bias is caused by a certain subset of weights in the network; our method is, in essence, a mask search to identify and remove biased weights.Our masks show equivalent or superior performance to the standard counterparts, while offering important benefits.Pruning masks can be stored with high efficiency in memory, and it becomes possible to switch among several debiasing behaviors (or revert back to the original biased model) at inference time. Finally, it opens the doors to further research on how biases are acquired by studying the generated masks. For example, we observed that the early layers and attention heads were pruned more aggressively, possibly hinting towards the location in which biases may be encoded.", + "author": "Johannes Mario Meissner; Saku Sugawara; Akiko Aizawa", + "authorids": "/j/johannes-mario-meissner/; /s/saku-sugawara/; /a/akiko-aizawa/", + "bibtex": "@inproceedings{meissner-etal-2022-debiasing,\n title = \"Debiasing Masks: A New Framework for Shortcut Mitigation in {NLU}\",\n author = \"Meissner, Johannes Mario and\n Sugawara, Saku and\n Aizawa, Akiko\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.517/\",\n doi = \"10.18653/v1/2022.emnlp-main.517\",\n pages = \"7607--7613\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.517.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.517/", + "pdf_size": 163924, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1878513488582539939&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "The University of Tokyo; National Institute of Informatics; The University of Tokyo + National Institute of Informatics", + "aff_domain": "nii.ac.jp;nii.ac.jp;nii.ac.jp", + "email": "nii.ac.jp;nii.ac.jp;nii.ac.jp", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+1", + "aff_unique_norm": "University of Tokyo;National Institute of Informatics", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.nii.ac.jp/", + "aff_unique_abbr": "UTokyo;NII", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.emnlp-main.651", + "title": "Debiasing Pretrained Text Encoders by Paying Attention to Paying Attention", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Natural Language Processing (NLP) models are found to exhibit discriminatory stereotypes across many social constructs, e.g. gender and race. In comparison to the progress made in reducing bias from static word embeddings, fairness in sentence-level text encoders received little consideration despite their wider applicability in contemporary NLP tasks. In this paper, we propose a debiasing method for pre-trained text encoders that both reduces social stereotypes, and inflicts next to no semantic damage. Unlike previous studies that directly manipulate the embeddings, we suggest to dive deeper into the operation of these encoders, and pay more attention to the way they pay attention to different social groups. We find that stereotypes are also encoded in the attention layer. Then, we work on model debiasing by redistributing the attention scores of a text encoder such that it forgets any preference to historically advantaged groups, and attends to all social classes with the same intensity. Our experiments confirm that reducing bias from attention effectively mitigates it from the model\u2019s text representations.", + "author": "Yacine Gaci; Boualem Benatallah; Fabio Casati; Khalid Benabdeslem", + "authorids": "/y/yacine-gaci/; /b/boualem-benatallah/; /f/fabio-casati/; /k/khalid-benabdeslem/", + "bibtex": "@inproceedings{gaci-etal-2022-debiasing,\n title = \"Debiasing Pretrained Text Encoders by Paying Attention to Paying Attention\",\n author = \"Gaci, Yacine and\n Benatallah, Boualem and\n Casati, Fabio and\n Benabdeslem, Khalid\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.651/\",\n doi = \"10.18653/v1/2022.emnlp-main.651\",\n pages = \"9582--9602\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.651.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.651/", + "pdf_size": 772001, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5939036739585977143&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "LIRIS - University of Lyon 1, France; Dublin City University, Ireland+UNSW, Sydney, Australia; ServiceNow, USA; LIRIS - University of Lyon 1, France", + "aff_domain": "univ-lyon1.fr;univ-lyon1.fr;gmail.com;gmail.com", + "email": "univ-lyon1.fr;univ-lyon1.fr;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2;3;0", + "aff_unique_norm": "University of Lyon 1;Dublin City University;University of New South Wales;ServiceNow", + "aff_unique_dep": "LIRIS;;;", + "aff_unique_url": "https://www.univ-lyon1.fr;https://www.dcu.ie;https://www.unsw.edu.au;https://www.servicenow.com", + "aff_unique_abbr": ";DCU;UNSW;ServiceNow", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Sydney", + "aff_country_unique_index": "0;1+2;3;0", + "aff_country_unique": "France;Ireland;Australia;United States" + }, + { + "id": "2022.emnlp-main.601", + "title": "Decoding a Neural Retriever\u2019s Latent Space for Query Suggestion", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Neural retrieval models have superseded classic bag-of-words methods such as BM25 as the retrieval framework of choice. However, neural systems lack the interpretability of bag-of-words models; it is not trivial to connect a query change to a change in the latent space that ultimately determines the retrieval results. To shed light on this embedding space, we learn a \u201cquery decoder\u201d that, given a latent representation of a neural search engine, generates the corresponding query. We show that it is possible to decode a meaningful query from its latent representation and, when moving in the right direction in latent space, to decode a query that retrieves the relevant paragraph. In particular, the query decoder can be useful to understand \u201cwhat should have been asked\u201d to retrieve a particular paragraph from the collection. We employ the query decoder to generate a large synthetic dataset of query reformulations for MSMarco, leading to improved retrieval performance. On this data, we train a pseudo-relevance feedback (PRF) T5 model for the application of query suggestion that outperforms both query reformulation and PRF information retrieval baselines.", + "author": "Leonard Adolphs; Michelle Chen Huebscher; Christian Buck; Sertan Girgin; Olivier Bachem; Massimiliano Ciaramita; Thomas Hofmann", + "authorids": "/l/leonard-adolphs/; /m/michelle-chen-huebscher/; /c/christian-buck/; /s/sertan-girgin/; /o/olivier-bachem/; /m/massimiliano-ciaramita/; /t/thomas-hofmann/", + "bibtex": "@inproceedings{adolphs-etal-2022-decoding,\n title = \"Decoding a Neural Retriever`s Latent Space for Query Suggestion\",\n author = \"Adolphs, Leonard and\n Chen Huebscher, Michelle and\n Buck, Christian and\n Girgin, Sertan and\n Bachem, Olivier and\n Ciaramita, Massimiliano and\n Hofmann, Thomas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.601/\",\n doi = \"10.18653/v1/2022.emnlp-main.601\",\n pages = \"8786--8804\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.601.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.601/", + "pdf_size": 623124, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4024313620497610429&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "ETH Z\u00fcrich; Google Research; Google Research; Google Research; Google Research; Google Research; ETH Z\u00fcrich", + "aff_domain": "inf.ethz.ch; ; ; ; ; ; ", + "email": "inf.ethz.ch; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;1;1;1;0", + "aff_unique_norm": "ETH Z\u00fcrich;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.ethz.ch;https://research.google", + "aff_unique_abbr": "ETHZ;Google Research", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;1;1;1;1;1;0", + "aff_country_unique": "Switzerland;United States" + }, + { + "id": "2022.emnlp-main.74", + "title": "Deconfounding Legal Judgment Prediction for European Court of Human Rights Cases Towards Better Alignment with Experts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This work demonstrates that Legal Judgement Prediction systems without expert-informed adjustments can be vulnerable to shallow, distracting surface signals that arise from corpus construction, case distribution, and confounding factors. To mitigate this, we use domain expertise to strategically identify statistically predictive but legally irrelevant information. We adopt adversarial training to prevent the system from relying on it. We evaluate our deconfounded models by employing interpretability techniques and comparing to expert annotations. Quantitative experiments and qualitative analysis show that our deconfounded model consistently aligns better with expert rationales than baselines trained for prediction only. We further contribute a set of reference expert annotations to the validation and testing partitions of an existing benchmark dataset of European Court of Human Rights cases.", + "author": "Santosh T.y.s.s; Shanshan Xu; Oana Ichim; Matthias Grabmair", + "authorids": "/s/santosh-t-y-s-s/; /s/shanshan-xu/; /o/oana-ichim/; /m/matthias-grabmair/", + "bibtex": "@inproceedings{santosh-etal-2022-deconfounding,\n title = \"Deconfounding Legal Judgment Prediction for {E}uropean Court of Human Rights Cases Towards Better Alignment with Experts\",\n author = \"T.y.s.s, Santosh and\n Xu, Shanshan and\n Ichim, Oana and\n Grabmair, Matthias\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.74/\",\n doi = \"10.18653/v1/2022.emnlp-main.74\",\n pages = \"1120--1138\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.74.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.74/", + "pdf_size": 1132471, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11229180296888363009&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 4, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-industry.53", + "title": "Dense Feature Memory Augmented Transformers for COVID-19 Vaccination Search Classification", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "With the devastating outbreak of COVID-19, vaccines are one of the crucial lines of defense against mass infection in this global pandemic. Given the protection they provide, vaccines are becoming mandatory in certain social and professional settings. This paper presents a classification model for detecting COVID-19 vaccination related search queries, a machine learning model that is used to generate search insights for COVID-19 vaccinations. The proposed method combines and leverages advancements from modern state-of-the-art (SOTA) natural language understanding (NLU) techniques such as pretrained Transformers with traditional dense features. We propose a novel approach of considering dense features as memory tokens that the model can attend to. We show that this new modeling approach enables a significant improvement to the Vaccine Search Insights (VSI) task, improving a strong well-established gradient-boosting baseline by relative +15% improvement in F1 score and +14% in precision.", + "author": "Jai Gupta; Yi Tay; Chaitanya Kamath; Vinh Tran; Donald Metzler; Shailesh Bavadekar; Mimi Sun; Evgeniy Gabrilovich", + "authorids": "/j/jai-gupta/; /y/yi-tay/; /c/chaitanya-kamath/; /v/vinh-tran/; /d/donald-metzler/; /s/shailesh-bavadekar/; /m/mimi-sun/; /e/evgeniy-gabrilovich/", + "bibtex": "@inproceedings{gupta-etal-2022-dense,\n title = \"Dense Feature Memory Augmented Transformers for {COVID}-19 Vaccination Search Classification\",\n author = \"Gupta, Jai and\n Tay, Yi and\n Kamath, Chaitanya and\n Tran, Vinh and\n Metzler, Donald and\n Bavadekar, Shailesh and\n Sun, Mimi and\n Gabrilovich, Evgeniy\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.53/\",\n doi = \"10.18653/v1/2022.emnlp-industry.53\",\n pages = \"521--530\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.53.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.53/", + "pdf_size": 1141212, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:5jcD9drlBNQJ:scholar.google.com/&scioq=Dense+Feature+Memory+Augmented+Transformers+for+COVID-19+Vaccination+Search+Classification&hl=en&as_sdt=0,5", + "gs_version_total": 5, + "aff": "Google Research; Google Research; Google Research; Google Research; Google Research; Google Research; Google Research; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com;google.com;google.com;google.com;acm.org", + "email": "google.com;google.com;google.com;google.com;google.com;google.com;google.com;acm.org", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.543", + "title": "Dependency Parsing via Sequence Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Dependency parsing aims to extract syntactic dependency structure or semantic dependency structure for sentences.Existing methods for dependency parsing include transition-based method, graph-based method and sequence-to-sequence method.These methods obtain excellent performance and we notice them belong to labeling method.Therefore, it may be very valuable and interesting to explore the possibility of using generative method to implement dependency parsing.In this paper, we propose to achieve Dependency Parsing (DP) via Sequence Generation (SG) by utilizing only the pre-trained language model without any auxiliary structures.We first explore different serialization designing strategies for converting parsing structures into sequences.Then we design dependency units and concatenate these units into the sequence for DPSG.We verify the DPSG is capable of parsing on widely used DP benchmarks, i.e., PTB, UD2.2, SDP15 and SemEval16.In addition, we also investigate the astonishing low-resource applicability of DPSG, which includes unsupervised cross-domain conducted on CODT and few-shot cross-task conducted on SDP15.Our research demonstrates that sequence generation is one of the effective methods to achieve dependency parsing.Our codes are available now.", + "author": "Boda Lin; Zijun Yao; Jiaxin Shi; Shulin Cao; Binghao Tang; Si Li; Yong Luo; Juanzi Li; Lei Hou", + "authorids": "/b/boda-lin/; /z/zijun-yao/; /j/jiaxin-shi/; /s/shulin-cao/; /b/binghao-tang/; /s/si-li/; /y/yong-luo/; /j/juanzi-li/; /l/lei-hou/", + "bibtex": "@inproceedings{lin-etal-2022-dependency,\n title = \"Dependency Parsing via Sequence Generation\",\n author = \"Lin, Boda and\n Yao, Zijun and\n Shi, Jiaxin and\n Cao, Shulin and\n Tang, Binghao and\n Li, Si and\n Luo, Yong and\n Li, Juanzi and\n Hou, Lei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.543/\",\n doi = \"10.18653/v1/2022.findings-emnlp.543\",\n pages = \"7339--7353\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.543.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.543/", + "pdf_size": 1538681, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10053032793038452708&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;;", + "aff_domain": ";;;;;;;;", + "email": ";;;;;;;;", + "github": "https://github.com/TimeLessLing/DPSG-code/tree/main", + "project": "", + "author_num": 9 + }, + { + "id": "2022.emnlp-industry.55", + "title": "Deploying Unified BERT Moderation Model for E-Commerce Reviews", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Moderation of user-generated e-commerce content has become crucial due to the large and diverse user base on the platforms. Product reviews and ratings have become an integral part of the shopping experience to build trust among users. Due to the high volume of reviews generated on a vast catalog of products, manual moderation is infeasible, making machine moderation a necessity. In this work, we described our deployed system and models for automated moderation of user-generated content. At the heart of our approach, we outline several rejection reasons for review & rating moderation and explore a unified BERT model to moderate them. We convey the importance of product vertical embeddings for the relevancy of the review for a given product and highlight the advantages of pre-training the BERT models with monolingual data to cope with the domain gap in the absence of huge labelled datasets. We observe a 4.78% F1 increase with less labelled data and a 2.57% increase in F1 score on the review data compared to the publicly available BERT-based models. Our best model In-House-BERT-vertical sends only 5.89% of total reviews to manual moderation and has been deployed in production serving live traffic for millions of users.", + "author": "Ravindra Nayak; Nikesh Garera", + "authorids": "/r/ravindra-nayak/; /n/nikesh-garera/", + "bibtex": "@inproceedings{nayak-garera-2022-deploying,\n title = \"Deploying Unified {BERT} Moderation Model for {E}-Commerce Reviews\",\n author = \"Nayak, Ravindra and\n Garera, Nikesh\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.55/\",\n doi = \"10.18653/v1/2022.emnlp-industry.55\",\n pages = \"540--547\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.55.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.55/", + "pdf_size": 238881, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:6IqtUkAmuSAJ:scholar.google.com/&scioq=Deploying+Unified+BERT+Moderation+Model+for+E-Commerce+Reviews&hl=en&as_sdt=0,33", + "gs_version_total": 0, + "aff": "Flipkart; Flipkart", + "aff_domain": "flipkart.com;flipkart.com", + "email": "flipkart.com;flipkart.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Flipkart", + "aff_unique_dep": "", + "aff_unique_url": "https://www.flipkart.com", + "aff_unique_abbr": "Flipkart", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.emnlp-industry.17", + "title": "Deploying a Retrieval based Response Model for Task Oriented Dialogues", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Task-oriented dialogue systems in industry settings need to have high conversational capability, be easily adaptable to changing situations and conform to business constraints. This paper describes a 3-step procedure to develop a conversational model that satisfies these criteria and can efficiently scale to rank a large set of response candidates. First, we provide a simple algorithm to semi-automatically create a high-coverage template set from historic conversations without any annotation. Second, we propose a neural architecture that encodes the dialogue context and applicable business constraints as profile features for ranking the next turn. Third, we describe a two-stage learning strategy with self-supervised training, followed by supervised fine-tuning on limited data collected through a human-in-the-loop platform. Finally, we describe offline experiments and present results of deploying our model with human-in-the-loop to converse with live customers online.", + "author": "Lahari Poddar; Gy\u00f6rgy Szarvas; Cheng Wang; Jorge Balazs; Pavel Danchenko; Patrick Ernst", + "authorids": "/l/lahari-poddar/; /g/gyorgy-szarvas/; /c/cheng-wang/; /j/jorge-balazs/; /p/pavel-danchenko/; /p/patrick-ernst/", + "bibtex": "@inproceedings{poddar-etal-2022-deploying,\n title = \"Deploying a Retrieval based Response Model for Task Oriented Dialogues\",\n author = {Poddar, Lahari and\n Szarvas, Gy{\\\"o}rgy and\n Wang, Cheng and\n Balazs, Jorge and\n Danchenko, Pavel and\n Ernst, Patrick},\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.17/\",\n doi = \"10.18653/v1/2022.emnlp-industry.17\",\n pages = \"169--178\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.17.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.17/", + "pdf_size": 685441, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:nKPu1tZdyv4J:scholar.google.com/&scioq=Deploying+a+Retrieval+based+Response+Model+for+Task+Oriented+Dialogues&hl=en&as_sdt=0,5", + "gs_version_total": 7, + "aff": "Amazon; Amazon; Amazon; Amazon; Amazon; Amazon", + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Amazon.com, Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.279", + "title": "Describing Sets of Images with Textual-PCA", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We seek to semantically describe a set of images, capturing both the attributes of single images and the variations within the set. Our procedure is analogous to Principle Component Analysis, in which the role of projection vectors is replaced with generated phrases. First, a centroid phrase that has the largest average semantic similarity to the images in the set is generated, where both the computation of the similarity and the generation are based on pretrained vision-language models. Then, the phrase that generates the highest variation among the similarity scores is generated, using the same models. The next phrase maximizes the variance subject to being orthogonal, in the latent space, to the highest-variance phrase, and the process continues. Our experiments show that our method is able to convincingly capture the essence of image sets and describe the individual elements in a semantically meaningful way within the context of the entire set. Our code is available at: https://github.com/OdedH/textual-pca.", + "author": "Oded Hupert; Idan Schwartz; Lior Wolf", + "authorids": "/o/oded-hupert/; /i/idan-schwartz/; /l/lior-wolf/", + "bibtex": "@inproceedings{hupert-etal-2022-describing,\n title = \"Describing Sets of Images with Textual-{PCA}\",\n author = \"Hupert, Oded and\n Schwartz, Idan and\n Wolf, Lior\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.279/\",\n doi = \"10.18653/v1/2022.findings-emnlp.279\",\n pages = \"3811--3821\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.279.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.279/", + "pdf_size": 8035929, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2243453455964624558&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.57", + "title": "Detect-Localize-Repair: A Unified Framework for Learning to Debug with CodeT5", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Automated software debugging is a crucial task for improving the productivity of software developers. Many neural-based techniques have been proven effective for debugging-related tasks such as bug localization and program repair (or bug fixing). However, these techniques often focus only on either one of them or approach them in a stage-wise manner, ignoring the mutual benefits between them. In this work, we propose a novel unified Detect-Localize-Repair framework based on a pretrained programming language model CodeT5 to seamlessly address these tasks, named CodeT5-DLR. Specifically, we propose three objectives to adapt the generic CodeT5 for debugging: a bug detection objective to determine whether a given code snippet is buggy or not, a bug localization objective to identify the buggy lines, and a program repair objective to translate the buggy code to its fixed version. We evaluate it on each of these tasks and their combined setting on two newly collected line-level debugging datasets in Java and Python. Extensive results show that our model significantly outperforms existing baselines from both NLP and software engineering domains.", + "author": "Nghi Bui; Yue Wang; Steven C.H. Hoi", + "authorids": "/n/nghi-bui/; /y/yue-wang/; /s/steven-c-h-hoi/", + "bibtex": "@inproceedings{bui-etal-2022-detect,\n title = \"Detect-Localize-Repair: A Unified Framework for Learning to Debug with {C}ode{T}5\",\n author = \"Bui, Nghi and\n Wang, Yue and\n Hoi, Steven C.H.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.57/\",\n doi = \"10.18653/v1/2022.findings-emnlp.57\",\n pages = \"812--823\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.57.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.57/", + "pdf_size": 1277661, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11304695818889214261&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Salesforce Research Asia; Salesforce Research Asia; Salesforce Research Asia", + "aff_domain": "salesforce.com;salesforce.com;salesforce.com", + "email": "salesforce.com;salesforce.com;salesforce.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Salesforce Research", + "aff_unique_dep": "Research", + "aff_unique_url": "https://research.salesforce.com", + "aff_unique_abbr": "Salesforce Research Asia", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Asia", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "2022.findings-emnlp.386", + "title": "Detecting Dementia from Long Neuropsychological Interviews", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Neuropsychological exams are commonly used to diagnose various kinds of cognitive impairment. They typically involve a trained examiner who conducts a series of cognitive tests with a subject. In recent years, there has been growing interest in developing machine learning methods to extract speech and language biomarkers from exam recordings to provide automated input for cognitive assessment. Inspired by recent findings suggesting that the examiner\u2019s language can influence cognitive impairment classifications, in this paper, we study the influence of the examiner on automatic dementia identification decisions in real-world neuropsychological exams. To mitigate the influence of the examiner, we propose a systematic three-stage pipeline for detecting dementia from exam recordings. In the first stage, we perform audio-based speaker diarization (i.e., estimating who spoke when?) by incorporating speaker discriminative features. In the second stage, we employ text-based language models to identify the role of the speaker (i.e., examiner or subject). Finally, in the third stage, we employ text- and audio-based models to detect cognitive impairment from hypothesized subject segments. Our studies suggest that incorporating audio-based diarization followed by text-based role identification helps mitigate the influences from the examiner\u2019s segments. Further, we found that the text and audio modalities complement each other, and the performance improves when we use both modalities. We also perform several carefully designed experimental studies to assess the performance of each stage.", + "author": "Nauman Dawalatabad; Yuan Gong; Sameer Khurana; Rhoda Au; James Glass", + "authorids": "/n/nauman-dawalatabad/; /y/yuan-gong/; /s/sameer-khurana/; /r/rhoda-au/; /j/james-glass/", + "bibtex": "@inproceedings{dawalatabad-etal-2022-detecting,\n title = \"Detecting Dementia from Long Neuropsychological Interviews\",\n author = \"Dawalatabad, Nauman and\n Gong, Yuan and\n Khurana, Sameer and\n Au, Rhoda and\n Glass, James\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.386/\",\n doi = \"10.18653/v1/2022.findings-emnlp.386\",\n pages = \"5270--5283\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.386.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.386/", + "pdf_size": 1460852, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13621063363014345768&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 9, + "aff": "MIT CSAIL; MIT CSAIL; MIT CSAIL; Boston University; MIT CSAIL", + "aff_domain": "mit.edu;mit.edu;mit.edu;bu.edu;mit.edu", + "email": "mit.edu;mit.edu;mit.edu;bu.edu;mit.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Boston University", + "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;", + "aff_unique_url": "https://www.csail.mit.edu;https://www.bu.edu", + "aff_unique_abbr": "MIT CSAIL;BU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Cambridge;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.618", + "title": "Detecting Label Errors by Using Pre-Trained Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We show that large pre-trained language models are inherently highly capable of identifying label errors in natural language datasets: simply examining out-of-sample data points in descending order of fine-tuned task loss significantly outperforms more complex error-detection mechanisms proposed in previous work. To this end, we contribute a novel method for introducing realistic, human-originated label noise into existing crowdsourced datasets such as SNLI and TweetNLP. We show that this noise has similar properties to real, hand-verified label errors, and is harder to detect than existing synthetic noise, creating challenges for model robustness.We argue that human-originated noise is a better standard for evaluation than synthetic noise. Finally, we use crowdsourced verification to evaluate the detection of real errors on IMDB, Amazon Reviews, and Recon, and confirm that pre-trained models perform at a 9\u201336% higher absolute Area Under the Precision-Recall Curve than existing models.", + "author": "Derek Chong; Jenny Hong; Christopher Manning", + "authorids": "/d/derek-chong/; /j/jenny-hong/; /c/christopher-d-manning/", + "bibtex": "@inproceedings{chong-etal-2022-detecting,\n title = \"Detecting Label Errors by Using Pre-Trained Language Models\",\n author = \"Chong, Derek and\n Hong, Jenny and\n Manning, Christopher\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.618/\",\n doi = \"10.18653/v1/2022.emnlp-main.618\",\n pages = \"9074--9091\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.618.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.618/", + "pdf_size": 668344, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12416031320745915559&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Stanford University; Stanford University; Stanford University", + "aff_domain": "stanford.edu;cs.stanford.edu;cs.stanford.edu", + "email": "stanford.edu;cs.stanford.edu;cs.stanford.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.393", + "title": "Detecting Languages Unintelligible to Multilingual Models through Local Structure Probes", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Providing better language tools for low-resource and endangered languages is imperative for equitable growth.Recent progress with massively multilingual pretrained models has proven surprisingly effective at performing zero-shot transfer to a wide variety of languages.However, this transfer is not universal, with many languages not currently understood by multilingual approaches.It is estimated that only 72 languages possess a \u201csmall set of labeled datasets\u201d on which we could test a model\u2019s performance, the vast majority of languages not having the resources available to simply evaluate performances on.In this work, we attempt to clarify which languages do and do not currently benefit from such transfer.To that end, we develop a general approach that requires only unlabelled text to detect which languages are not well understood by a cross-lingual model.Our approach is derived from the hypothesis that if a model\u2019s understanding is insensitive to perturbations to text in a language, it is likely to have a limited understanding of that language.We construct a cross-lingual sentence similarity task to evaluate our approach empirically on 350, primarily low-resource, languages.", + "author": "Louis Clouatre; Prasanna Parthasarathi; Amal Zouaq; Sarath Chandar", + "authorids": "/l/louis-clouatre/; /p/prasanna-parthasarathi/; /a/amal-zouaq/; /s/sarath-chandar/", + "bibtex": "@inproceedings{clouatre-etal-2022-detecting,\n title = \"Detecting Languages Unintelligible to Multilingual Models through Local Structure Probes\",\n author = \"Clouatre, Louis and\n Parthasarathi, Prasanna and\n Zouaq, Amal and\n Chandar, Sarath\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.393/\",\n doi = \"10.18653/v1/2022.findings-emnlp.393\",\n pages = \"5375--5396\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.393.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.393/", + "pdf_size": 924351, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:XIaxZKz-fboJ:scholar.google.com/&scioq=Detecting+Languages+Unintelligible+to+Multilingual+Models+through+Local+Structure+Probes&hl=en&as_sdt=0,33", + "gs_version_total": 4, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-industry.39", + "title": "Developing Prefix-Tuning Models for Hierarchical Text Classification", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Hierarchical text classification (HTC) is a key problem and task in many industrial applications, which aims to predict labels organized in a hierarchy for given input text. For example, HTC can group the descriptions of online products into a taxonomy or organizing customer reviews into a hierarchy of categories. In real-life applications, while Pre-trained Language Models (PLMs) have dominated many NLP tasks, they face significant challenges too\u2014the conventional fine-tuning process needs to modify and save models with a huge number of parameters. This is becoming more critical for HTC in both global and local modelling\u2014the latter needs to learn multiple classifiers at different levels/nodes in a hierarchy. The concern will be even more serious since PLM sizes are continuing to increase in order to attain more competitive performances. Most recently, prefix tuning has become a very attractive technology by only tuning and saving a tiny set of parameters. Exploring prefix turning for HTC is hence highly desirable and has timely impact. In this paper, we investigate prefix tuning on HTC in two typical setups: local and global HTC. Our experiment shows that the prefix-tuning model only needs less than 1% of parameters and can achieve performance comparable to regular full fine-tuning. We demonstrate that using contrastive learning in learning prefix vectors can further improve HTC performance.", + "author": "Lei Chen; Houwei Chou; Xiaodan Zhu", + "authorids": "/l/lei-chen/; /h/houwei-chou/; /x/xiaodan-zhu/", + "bibtex": "@inproceedings{chen-etal-2022-developing,\n title = \"Developing Prefix-Tuning Models for Hierarchical Text Classification\",\n author = \"Chen, Lei and\n Chou, Houwei and\n Zhu, Xiaodan\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.39/\",\n doi = \"10.18653/v1/2022.emnlp-industry.39\",\n pages = \"390--397\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.39.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.39/", + "pdf_size": 361217, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3064151328677193943&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Rakuten Institute of Technology (RIT); Rakuten Institute of Technology (RIT); Ingenuity Labs Research Institute & ECE, Queen\u2019s University, Canada", + "aff_domain": "rakuten.com;rakuten.com;queensu.ca", + "email": "rakuten.com;rakuten.com;queensu.ca", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Rakuten Institute of Technology;Queen\u2019s University", + "aff_unique_dep": ";Ingenuity Labs Research Institute & ECE", + "aff_unique_url": "https://rit.rakuten.com;https://www.queensu.ca", + "aff_unique_abbr": "RIT;Queen's U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "Japan;Canada" + }, + { + "id": "2022.emnlp-main.490", + "title": "Dial2vec: Self-Guided Contrastive Learning of Unsupervised Dialogue Embeddings", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we introduce the task of learning unsupervised dialogue embeddings.Trivial approaches such as combining pre-trained word or sentence embeddings and encoding through pre-trained language models (PLMs) have been shown to be feasible for this task.However, these approaches typically ignore the conversational interactions between interlocutors, resulting in poor performance.To address this issue, we proposed a self-guided contrastive learning approach named dial2vec.Dial2vec considers a dialogue as an information exchange process.It captures the interaction patterns between interlocutors and leverages them to guide the learning of the embeddings corresponding to each interlocutor.Then the dialogue embedding is obtained by an aggregation of the embeddings from all interlocutors.To verify our approach, we establish a comprehensive benchmark consisting of six widely-used dialogue datasets.We consider three evaluation tasks: domain categorization, semantic relatedness, and dialogue retrieval.Dial2vec achieves on average 8.7, 9.0, and 13.8 points absolute improvements in terms of purity, Spearman\u2019s correlation, and mean average precision (MAP) over the strongest baseline on the three tasks respectively.Further analysis shows that dial2vec obtains informative and discriminative embeddings for both interlocutors under the guidance of the conversational interactions and achieves the best performance when aggregating them through the interlocutor-level pooling strategy.All codes and data are publicly available at https://github.com/AlibabaResearch/DAMO-ConvAI/tree/main/dial2vec.", + "author": "Che Liu; Rui Wang; Junfeng Jiang; Yongbin Li; Fei Huang", + "authorids": "/c/che-liu/; /r/rui-wang/; /j/junfeng-jiang/; /y/yongbin-li/; /f/fei-huang/", + "bibtex": "@inproceedings{liu-etal-2022-dial2vec,\n title = \"Dial2vec: Self-Guided Contrastive Learning of Unsupervised Dialogue Embeddings\",\n author = \"Liu, Che and\n Wang, Rui and\n Jiang, Junfeng and\n Li, Yongbin and\n Huang, Fei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.490/\",\n doi = \"10.18653/v1/2022.emnlp-main.490\",\n pages = \"7272--7282\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.490.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.490/", + "pdf_size": 500814, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14577716072194982309&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group", + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "https://github.com/AlibabaResearch/DAMO-ConvAI/tree/main/dial2vec", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "DAMO Academy", + "aff_unique_url": "https://www.alibaba-group.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.828", + "title": "DialogConv: A Lightweight Fully Convolutional Network for Multi-view Response Selection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Current end-to-end retrieval-based dialogue systems are mainly based on Recurrent Neural Networks or Transformers with attention mechanisms. Although promising results have been achieved, these models often suffer from slow inference or huge number of parameters. In this paper, we propose a novel lightweight fully convolutional architecture, called DialogConv, for response selection. DialogConv is exclusively built on top of convolution to extract matching features of context and response. Dialogues are modeled in 3D views, where DialogConv performs convolution operations on embedding view, word view and utterance view to capture richer semantic information from multiple contextual views. On the four benchmark datasets, compared with state-of-the-art baselines, DialogConv is on average about 8.5x smaller in size, and 79.39x and 10.64x faster on CPU and GPU devices, respectively. At the same time, DialogConv achieves the competitive effectiveness of response selection.", + "author": "Yongkang Liu; Shi Feng; Wei Gao; Daling Wang; Yifei Zhang", + "authorids": "/y/yongkang-liu/; /s/shi-feng/; /w/wei-gao/; /d/daling-wang/; /y/yifei-zhang/", + "bibtex": "@inproceedings{liu-etal-2022-dialogconv,\n title = \"{D}ialog{C}onv: A Lightweight Fully Convolutional Network for Multi-view Response Selection\",\n author = \"Liu, Yongkang and\n Feng, Shi and\n Gao, Wei and\n Wang, Daling and\n Zhang, Yifei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.828/\",\n doi = \"10.18653/v1/2022.emnlp-main.828\",\n pages = \"12086--12098\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.828.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.828/", + "pdf_size": 1218849, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3659862255663645594&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Northeastern University, China; Northeastern University, China; Singapore Management University, Singapore; Northeastern University, China; Northeastern University, China", + "aff_domain": "163.com;cse.neu.edu.cn;smu.edu.sg;cse.neu.edu.cn;cse.neu.edu.cn", + "email": "163.com;cse.neu.edu.cn;smu.edu.sg;cse.neu.edu.cn;cse.neu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "Northeastern University;Singapore Management University", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.neu.edu.cn/;https://www.smu.edu.sg", + "aff_unique_abbr": "NEU;SMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.findings-emnlp.234", + "title": "DialogUSR: Complex Dialogue Utterance Splitting and Reformulation for Multiple Intent Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While interacting with chatbots, users may elicit multiple intents in a single dialogue utterance. Instead of training a dedicated multi-intent detection model, we propose DialogUSR, a dialogue utterance splitting and reformulation task that first splits multi-intent user query into several single-intent sub-queries and then recovers all the coreferred and omitted information in the sub-queries. DialogUSR can serve as a plug-in and domain-agnostic module that empowers the multi-intent detection for the deployed chatbots with minimal efforts. We collect a high-quality naturally occurring dataset that covers 23 domains with a multi-step crowd-souring procedure. To benchmark the proposed dataset, we propose multiple action-based generative models that involve end-to-end and two-stage training, and conduct in-depth analyses on the pros and cons of the proposed baselines.", + "author": "Haoran Meng; Zheng Xin; Tianyu Liu; Zizhen Wang; He Feng; Binghuai Lin; Xuemin Zhao; Yunbo Cao; Zhifang Sui", + "authorids": "/h/haoran-meng/; /z/zheng-xin/; /t/tianyu-liu/; /z/zizhen-wang/; /h/he-feng/; /b/binghuai-lin/; /x/xuemin-zhao/; /y/yunbo-cao/; /z/zhifang-sui/", + "bibtex": "@inproceedings{meng-etal-2022-dialogusr,\n title = \"{D}ialog{USR}: Complex Dialogue Utterance Splitting and Reformulation for Multiple Intent Detection\",\n author = \"Meng, Haoran and\n Xin, Zheng and\n Liu, Tianyu and\n Wang, Zizhen and\n Feng, He and\n Lin, Binghuai and\n Zhao, Xuemin and\n Cao, Yunbo and\n Sui, Zhifang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.234/\",\n doi = \"10.18653/v1/2022.findings-emnlp.234\",\n pages = \"3214--3229\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.234.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.234/", + "pdf_size": 1079772, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4851429100096957242&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "MOE Key Laboratory of Computational Linguistics, Peking University, China; Institute of Software, Chinese Academy of Sciences, China; Tencent Cloud Xiaowei + University of Chinese Academy of Sciences, China; Tencent Cloud Xiaowei + University of Chinese Academy of Sciences, China; Tencent Cloud Xiaowei + University of Chinese Academy of Sciences, China; Tencent Cloud Xiaowei + University of Chinese Academy of Sciences, China; Tencent Cloud Xiaowei + University of Chinese Academy of Sciences, China; Tencent Cloud Xiaowei + University of Chinese Academy of Sciences, China; MOE Key Laboratory of Computational Linguistics, Peking University, China", + "aff_domain": "stu.pku.edu.cn;iscas.ac.cn;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;pku.edu.cn", + "email": "stu.pku.edu.cn;iscas.ac.cn;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;pku.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;2+3;2+3;2+3;2+3;2+3;2+3;0", + "aff_unique_norm": "Peking University;Chinese Academy of Sciences;Tencent;University of Chinese Academy of Sciences", + "aff_unique_dep": "MOE Key Laboratory of Computational Linguistics;Institute of Software;Tencent Cloud Xiaowei;", + "aff_unique_url": "http://www.pku.edu.cn;http://www.ios.ac.cn;https://cloud.tencent.com;http://www.ucas.ac.cn", + "aff_unique_abbr": "PKU;CAS;Tencent;UCAS", + "aff_campus_unique_index": ";;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0+0;0+0;0+0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.17", + "title": "Dialogue Meaning Representation for Task-Oriented Dialogue Systems", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Dialogue meaning representation formulates natural language utterance semantics in their conversational context in an explicit and machine-readable form. Previous work typically follows the intent-slot framework, which is easy for annotation yet limited in scalability for complex linguistic expressions. A line of works alleviates the representation issue by introducing hierarchical structures but challenging to express complex compositional semantics, such as negation and coreference. We propose Dialogue Meaning Representation (DMR), a pliable and easily extendable representation for task-oriented dialogue. Our representation contains a set of nodes and edges to represent rich compositional semantics. Moreover, we propose an inheritance hierarchy mechanism focusing on domain extensibility. Additionally, we annotated DMR-FastFood, a multi-turn dialogue dataset with more than 70k utterances, with DMR. We propose two evaluation tasks to evaluate different dialogue models and a novel coreference resolution model GNNCoref for the graph-based coreference resolution task. Experiments show that DMR can be parsed well with pre-trained Seq2Seq models, and GNNCoref outperforms the baseline models by a large margin.The dataset and code are available at https://github.com/amazon-research/dialogue-meaning-representation", + "author": "Xiangkun Hu; Junqi Dai; Hang Yan; Yi Zhang; Qipeng Guo; Xipeng Qiu; Zheng Zhang", + "authorids": "/x/xiangkun-hu/; /j/junqi-dai/; /h/hang-yan/; /y/yi-zhang/; /q/qipeng-guo/; /x/xipeng-qiu/; /z/zheng-zhang/", + "bibtex": "@inproceedings{hu-etal-2022-dialogue,\n title = \"Dialogue Meaning Representation for Task-Oriented Dialogue Systems\",\n author = \"Hu, Xiangkun and\n Dai, Junqi and\n Yan, Hang and\n Zhang, Yi and\n Guo, Qipeng and\n Qiu, Xipeng and\n Zhang, Zheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.17/\",\n doi = \"10.18653/v1/2022.findings-emnlp.17\",\n pages = \"223--237\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.17.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.17/", + "pdf_size": 966549, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11806394093576069377&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Amazon AWS AI; School of Computer Science, Fudan University + Amazon Shanghai AI Lab; School of Computer Science, Fudan University; Amazon AWS AI; Amazon AWS AI; School of Computer Science, Fudan University; Amazon AWS AI", + "aff_domain": "amazon.com;m.fudan.edu.cn;fudan.edu.cn;amazon.com;amazon.com;fudan.edu.cn;amazon.com", + "email": "amazon.com;m.fudan.edu.cn;fudan.edu.cn;amazon.com;amazon.com;fudan.edu.cn;amazon.com", + "github": "https://github.com/amazon-research/dialogue-meaning-representation", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1+0;1;0;0;1;0", + "aff_unique_norm": "Amazon;Fudan University", + "aff_unique_dep": "Amazon Web Services AI;School of Computer Science", + "aff_unique_url": "https://aws.amazon.com;https://www.fudan.edu.cn", + "aff_unique_abbr": "AWS;Fudan", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;1+1;1;0;0;1;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.117", + "title": "DialogueGAT: A Graph Attention Network for Financial Risk Prediction by Modeling the Dialogues in Earnings Conference Calls", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Financial risk prediction is an essential task for risk management in capital markets. While traditional prediction models are built based on the hard information of numerical data, recent studies have shown that the soft information of verbal cues in earnings conference calls is significant for predicting market risk due to its less constrained fashion and direct interaction between managers and analysts. However, most existing models mainly focus on extracting useful semantic information from the textual conference call transcripts but ignore their subtle yet important information of dialogue structures. To bridge this gap, we develop a graph attention network called DialogueGAT for financial risk prediction by simultaneously modeling the speakers and their utterances in dialogues in conference calls. Different from previous studies, we propose a new method for constructing the graph of speakers and utterances in a dialogue, and design contextual attention at both speaker and utterance levels for disentangling their effects on the downstream prediction task. For model evaluation, we extend an existing dataset of conference call transcripts by adding the dialogue structure and speaker information. Empirical results on our dataset of S&P1500 companies demonstrate the superiority of our proposed model over competitive baselines from the extant literature.", + "author": "Yunxin Sang; Yang Bao", + "authorids": "/y/yunxin-sang/; /y/yang-bao/", + "bibtex": "@inproceedings{sang-bao-2022-dialoguegat,\n title = \"{D}ialogue{GAT}: A Graph Attention Network for Financial Risk Prediction by Modeling the Dialogues in Earnings Conference Calls\",\n author = \"Sang, Yunxin and\n Bao, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.117/\",\n doi = \"10.18653/v1/2022.findings-emnlp.117\",\n pages = \"1623--1633\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.117.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.117/", + "pdf_size": 584689, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14481994444155895191&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Antai College of Economics and Management, Shanghai Jiao Tong University; Antai College of Economics and Management, Shanghai Jiao Tong University", + "aff_domain": "gmail.com;sjtu.edu.cn", + "email": "gmail.com;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "Antai College of Economics and Management", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.696", + "title": "Dictionary-Assisted Supervised Contrastive Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Text analysis in the social sciences often involves using specialized dictionaries to reason with abstract concepts, such as perceptions about the economy or abuse on social media. These dictionaries allow researchers to impart domain knowledge and note subtle usages of words relating to a concept(s) of interest. We introduce the dictionary-assisted supervised contrastive learning (DASCL) objective, allowing researchers to leverage specialized dictionaries when fine-tuning pretrained language models. The text is first keyword simplified: a common, fixed token replaces any word in the corpus that appears in the dictionary(ies) relevant to the concept of interest. During fine-tuning, a supervised contrastive objective draws closer the embeddings of the original and keyword-simplified texts of the same class while pushing further apart the embeddings of different classes. The keyword-simplified texts of the same class are more textually similar than their original text counterparts, which additionally draws the embeddings of the same class closer together. Combining DASCL and cross-entropy improves classification performance metrics in few-shot learning settings and social science applications compared to using cross-entropy alone and alternative contrastive and data augmentation methods.", + "author": "Patrick Y. Wu; Richard Bonneau; Joshua A. Tucker; Jonathan Nagler", + "authorids": "/p/patrick-y-wu/; /r/richard-bonneau/; /j/joshua-a-tucker/; /j/jonathan-nagler/", + "bibtex": "@inproceedings{wu-etal-2022-dictionary,\n title = \"Dictionary-Assisted Supervised Contrastive Learning\",\n author = \"Wu, Patrick Y. and\n Bonneau, Richard and\n Tucker, Joshua A. and\n Nagler, Jonathan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.696/\",\n doi = \"10.18653/v1/2022.emnlp-main.696\",\n pages = \"10217--10235\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.696.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.696/", + "pdf_size": 801569, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7740501456607995284&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Center for Social Media and Politics, New York University + Center for Data Science, New York University + Department of Politics, New York University + Courant Institute of Mathematical Sciences, New York University; Center for Social Media and Politics, New York University + Center for Data Science, New York University + Department of Biology, New York University + Courant Institute of Mathematical Sciences, New York University; Center for Social Media and Politics, New York University + Center for Data Science, New York University + Department of Politics, New York University; Center for Social Media and Politics, New York University + Center for Data Science, New York University + Department of Politics, New York University", + "aff_domain": "nyu.edu;nyu.edu;nyu.edu;nyu.edu", + "email": "nyu.edu;nyu.edu;nyu.edu;nyu.edu", + "github": "https://github.com/SMAPPNYU/DASCL", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0+0+0;0+0+0+0;0+0+0;0+0+0", + "aff_unique_norm": "New York University", + "aff_unique_dep": "Center for Social Media and Politics", + "aff_unique_url": "https://www.nyu.edu", + "aff_unique_abbr": "NYU", + "aff_campus_unique_index": "0+0+0+0;0+0+0+0;0+0+0;0+0+0", + "aff_campus_unique": "New York", + "aff_country_unique_index": "0+0+0+0;0+0+0+0;0+0+0;0+0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.110", + "title": "DiffG-RL: Leveraging Difference between Environment State and Common Sense", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Taking into account background knowledge as the context has always been an important part of solving tasks that involve natural language. One representative example of such tasks is text-based games, where players need to make decisions based on both description text previously shown in the game, and their own background knowledge about the language and common sense. In this work, we investigate not simply giving common sense, as can be seen in prior research, but also its effective usage. We assume that a part of the environment states different from common sense should constitute one of the grounds for action selection. We propose a novel agent, DiffG-RL, which constructs a Difference Graph that organizes the environment states and common sense by means of interactive objects with a dedicated graph encoder. DiffG-RL also contains a framework for extracting the appropriate amount and representation of common sense from the source to support the construction of the graph. We validate DiffG-RL in experiments with text-based games that require common sense and show that it outperforms baselines by 17% of scores. We will make our code publicly available.", + "author": "Tsunehiko Tanaka; Daiki Kimura; Michiaki Tatsubori", + "authorids": "/t/tsunehiko-tanaka/; /d/daiki-kimura/; /m/michiaki-tatsubori/", + "bibtex": "@inproceedings{tanaka-etal-2022-diffg,\n title = \"{D}iff{G}-{RL}: Leveraging Difference between Environment State and Common Sense\",\n author = \"Tanaka, Tsunehiko and\n Kimura, Daiki and\n Tatsubori, Michiaki\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.110/\",\n doi = \"10.18653/v1/2022.findings-emnlp.110\",\n pages = \"1534--1546\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.110.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.110/", + "pdf_size": 334070, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:30G6ERIVwrIJ:scholar.google.com/&scioq=DiffG-RL:+Leveraging+Difference+between+Environment+State+and+Common+Sense&hl=en&as_sdt=0,34", + "gs_version_total": 0, + "aff": "Waseda University; IBM Research; IBM Research", + "aff_domain": "fuji.waseda.jp;jp.ibm.com;jp.ibm.com", + "email": "fuji.waseda.jp;jp.ibm.com;jp.ibm.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Waseda University;IBM", + "aff_unique_dep": ";IBM Research", + "aff_unique_url": "https://www.waseda.jp/top;https://www.ibm.com/research", + "aff_unique_abbr": "Waseda;IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "Japan;United States" + }, + { + "id": "2022.findings-emnlp.244", + "title": "Different Tunes Played with Equal Skill: Exploring a Unified Optimization Subspace for Parameter-Efficient Tuning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Delta tuning (DET, also known as parameter-efficient tuning) is deemed as the new paradigm for using pre-trained language models (PLMs). Up to now, various DETs with distinct design elements have been proposed, achieving performance on par with fine-tuning. However, the mechanisms behind the above success are still under-explored, especially the connections among various DETs. To fathom the mystery, we hypothesize that the adaptations of different DETs could all be reparameterized as low-dimensional optimizations in a unified optimization subspace, which could be found by jointly decomposing independent solutions of different DETs. Then we explore the connections among different DETs by conducting optimization within the subspace. In experiments, we find that, for a certain DET, conducting optimization simply in the subspace could achieve comparable performance to its original space, and the found solution in the subspace could be transferred to another DET and achieve non-trivial performance. We also visualize the performance landscape of the subspace, and find that, there exists a substantial region where different DETs all perform well. Finally, we extend our analysis and show the strong connections between fine-tuning and DETs. The codes are publicly available at https://github.com/thunlp/Unified-DeltaTuning.", + "author": "Jing Yi; Weize Chen; Yujia Qin; Yankai Lin; Ning Ding; Xu Han; Zhiyuan Liu; Maosong Sun; Jie Zhou", + "authorids": "/j/jing-yi/; /w/weize-chen/; /y/yujia-qin/; /y/yankai-lin/; /n/ning-ding/; /x/xu-han/; /z/zhiyuan-liu/; /m/maosong-sun/; /j/jie-zhou/", + "bibtex": "@inproceedings{yi-etal-2022-different,\n title = \"Different Tunes Played with Equal Skill: Exploring a Unified Optimization Subspace for Parameter-Efficient Tuning\",\n author = \"Yi, Jing and\n Chen, Weize and\n Qin, Yujia and\n Lin, Yankai and\n Ding, Ning and\n Han, Xu and\n Liu, Zhiyuan and\n Sun, Maosong and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.244/\",\n doi = \"10.18653/v1/2022.findings-emnlp.244\",\n pages = \"3348--3366\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.244.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.244/", + "pdf_size": 1451196, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=338704069074198627&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing; Gaoling School of Artificial Intelligence, Renmin University of China, Beijing + Beijing Key Laboratory of Big Data Management and Analysis Methods, Beijing; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing + International Innovation Center of Tsinghua University, Shanghai + Quan Cheng Laboratory; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing + International Innovation Center of Tsinghua University, Shanghai + Quan Cheng Laboratory; Pattern Recognition Center, WeChat AI, Tencent Inc.", + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ;tsinghua.edu.cn;tsinghua.edu.cn; ", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ;tsinghua.edu.cn;tsinghua.edu.cn; ", + "github": "https://github.com/thunlp/Unified-DeltaTuning", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;1+2;0;0;0+0+3;0+0+3;4", + "aff_unique_norm": "Tsinghua University;Renmin University of China;Beijing Key Laboratory of Big Data Management and Analysis Methods;Quan Cheng Laboratory;Tencent Inc.", + "aff_unique_dep": "NLP Group;Gaoling School of Artificial Intelligence;Big Data Management and Analysis;;Pattern Recognition Center, WeChat AI", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.ruc.edu.cn;;;https://www.tencent.com", + "aff_unique_abbr": "THU;RUC;;;Tencent", + "aff_campus_unique_index": "0;0;0;0;0;0;0+2;0+2", + "aff_campus_unique": "Beijing;;Shanghai", + "aff_country_unique_index": "0;0;0;0+0;0;0;0+0;0+0;0", + "aff_country_unique": "China;" + }, + { + "id": "2022.emnlp-main.520", + "title": "Differentiable Data Augmentation for Contrastive Sentence Representation Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Fine-tuning a pre-trained language model via the contrastive learning framework with a large amount of unlabeled sentences or labeled sentence pairs is a common way to obtain high-quality sentence representations. Although the contrastive learning framework has shown its superiority on sentence representation learning over previous methods, the potential of such a framework is under-explored so far due to the simple method it used to construct positive pairs. Motivated by this, we propose a method that makes hard positives from the original training examples. A pivotal ingredient of our approach is the use of prefix that attached to a pre-trained language model, which allows for differentiable data augmentation during contrastive learning. Our method can be summarized in two steps: supervised prefix-tuning followed by joint contrastive fine-tuning with unlabeled or labeled examples. Our experiments confirm the effectiveness of our data augmentation approach. The proposed method yields significant improvements over existing methods under both semi-supervised and supervised settings. Our experiments under a low labeled data setting also show that our method is more label-efficient than the state-of-the-art contrastive learning methods.", + "author": "Tianduo Wang; Wei Lu", + "authorids": "/t/tianduo-wang/; /w/wei-lu/", + "bibtex": "@inproceedings{wang-lu-2022-differentiable,\n title = \"Differentiable Data Augmentation for Contrastive Sentence Representation Learning\",\n author = \"Wang, Tianduo and\n Lu, Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.520/\",\n doi = \"10.18653/v1/2022.emnlp-main.520\",\n pages = \"7640--7653\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.520.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.520/", + "pdf_size": 566596, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9272886658453890790&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "StatNLP Research Group; Singapore University of Technology and Design", + "aff_domain": "sutd.edu.sg;sutd.edu.sg", + "email": "sutd.edu.sg;sutd.edu.sg", + "github": "https://github.com/TianduoWang/DiffAug", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "StatNLP Research Group;Singapore University of Technology and Design", + "aff_unique_dep": "Research Group;", + "aff_unique_url": ";https://www.sutd.edu.sg", + "aff_unique_abbr": ";SUTD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "1", + "aff_country_unique": ";Singapore" + }, + { + "id": "2022.emnlp-main.323", + "title": "Differentially Private Language Models for Secure Data Sharing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "To protect the privacy of individuals whose data is being shared, it is of high importance to develop methods allowing researchers and companies to release textual data while providing formal privacy guarantees to its originators. In the field of NLP, substantial efforts have been directed at building mechanisms following the framework of local differential privacy, thereby anonymizing individual text samples before releasing them. In practice, these approaches are often dissatisfying in terms of the quality of their output language due to the strong noise required for local differential privacy. In this paper, we approach the problem at hand using global differential privacy, particularly by training a generative language model in a differentially private manner and consequently sampling data from it. Using natural language prompts and a new prompt-mismatch loss, we are able to create highly accurate and fluent textual datasets taking on specific desired attributes such as sentiment or topic and resembling statistical properties of the training data. We perform thorough experiments indicating that our synthetic datasets do not leak information from our original data and are of high language quality and highly suitable for training models for further analysis on real-world data. Notably, we also demonstrate that training classifiers on private synthetic data outperforms directly training classifiers with DP-SGD.", + "author": "Justus Mattern; Zhijing Jin; Benjamin Weggenmann; Bernhard Schoelkopf; Mrinmaya Sachan", + "authorids": "/j/justus-mattern/; /z/zhijing-jin/; /b/benjamin-weggenmann/; /b/bernhard-schoelkopf/; /m/mrinmaya-sachan/", + "bibtex": "@inproceedings{mattern-etal-2022-differentially,\n title = \"Differentially Private Language Models for Secure Data Sharing\",\n author = \"Mattern, Justus and\n Jin, Zhijing and\n Weggenmann, Benjamin and\n Schoelkopf, Bernhard and\n Sachan, Mrinmaya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.323/\",\n doi = \"10.18653/v1/2022.emnlp-main.323\",\n pages = \"4860--4873\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.323.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.323/", + "pdf_size": 438125, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6005475842211564367&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "RWTH Aachen; MPI & ETH Z\u00fcrich; SAP Security Research; MPI for Intelligent Systems; ETH Z\u00fcrich", + "aff_domain": "rwth-aachen.de;tue.mpg.de;sap.com;tue.mpg.de;ethz.ch", + "email": "rwth-aachen.de;tue.mpg.de;sap.com;tue.mpg.de;ethz.ch", + "github": "https://github.com/justusmattern/private-datasets-with-llms", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;1", + "aff_unique_norm": "RWTH Aachen University;ETH Z\u00fcrich;SAP;Max Planck Institute for Intelligent Systems", + "aff_unique_dep": ";;Security Research;", + "aff_unique_url": "https://www.rwth-aachen.de;https://www.ethz.ch;https://www.sap.com;https://www.mpi-is.mpg.de", + "aff_unique_abbr": "RWTH;ETH;SAP;MPI-IS", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Aachen;Z\u00fcrich;", + "aff_country_unique_index": "0;1;0;0;1", + "aff_country_unique": "Germany;Switzerland" + }, + { + "id": "2022.emnlp-main.827", + "title": "Digging Errors in NMT: Evaluating and Understanding Model Errors from Partial Hypothesis Space", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Solid evaluation of neural machine translation (NMT) is key to its understanding and improvement. Current evaluation of an NMT system is usually built upon a heuristic decoding algorithm (e.g., beam search) and an evaluation metric assessing similarity between the translation and golden reference. However, this system-level evaluation framework is limited by evaluating only one best hypothesis and search errors brought by heuristic decoding algorithms. To better understand NMT models, we propose a novel evaluation protocol, which defines model errors with model\u2019s ranking capability over hypothesis space. To tackle the problem of exponentially large space, we propose two approximation methods, top region evaluation along with an exact top-k decoding algorithm, which finds top-ranked hypotheses in the whole hypothesis space, and Monte Carlo sampling evaluation, which simulates hypothesis space from a broader perspective. To quantify errors, we define our NMT model errors by measuring distance between the hypothesis array ranked by the model and the ideally ranked hypothesis array. After confirming the strong correlation with human judgment, we apply our evaluation to various NMT benchmarks and model architectures. We show that the state-of-the-art Transformer models face serious ranking issues and only perform at the random chance level in the top region. We further analyze model errors on architectures with different depths and widths, as well as different data-augmentation techniques, showing how these factors affect model errors. Finally, we connect model errors with the search algorithms and provide interesting findings of beam search inductive bias and correlation with Minimum Bayes Risk (MBR) decoding.", + "author": "Jianhao Yan; Chenming Wu; Fandong Meng; Jie Zhou", + "authorids": "/j/jianhao-yan/; /c/chenming-wu/; /f/fandong-meng/; /j/jie-zhou/", + "bibtex": "@inproceedings{yan-etal-2022-digging,\n title = \"Digging Errors in {NMT}: Evaluating and Understanding Model Errors from Partial Hypothesis Space\",\n author = \"Yan, Jianhao and\n Wu, Chenming and\n Meng, Fandong and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.827/\",\n doi = \"10.18653/v1/2022.emnlp-main.827\",\n pages = \"12067--12085\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.827.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.827/", + "pdf_size": 440478, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8697244646765220511&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "WeChat AI, Tencent, China; Tencent, China; WeChat AI, Tencent, China; WeChat AI, Tencent, China", + "aff_domain": "gmail.com;gmail.com;tencent.com;tencent.com", + "email": "gmail.com;gmail.com;tencent.com;tencent.com", + "github": "https://github.com/ElliottYan/digging_errors_nmt", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Tencent", + "aff_unique_dep": "WeChat AI", + "aff_unique_url": "https://www.tencent.com", + "aff_unique_abbr": "Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.25", + "title": "Dim-Krum: Backdoor-Resistant Federated Learning for NLP with Dimension-wise Krum-Based Aggregation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Despite the potential of federated learning, it is known to be vulnerable to backdoor attacks. Many robust federated aggregation methods are proposed to reduce the potential backdoor risk. However, they are mainly validated in the CV field. In this paper, we find that NLP backdoors are hard to defend against than CV, and we provide a theoretical analysis that the malicious update detection error probabilities are determined by the relative backdoor strengths. NLP attacks tend to have small relative backdoor strengths, which may result in the failure of robust federated aggregation methods for NLP attacks. Inspired by the theoretical results, we can choose some dimensions with higher backdoor strengths to settle this issue. We propose a novel federated aggregation algorithm, Dim-Krum, for NLP tasks, and experimental results validate its effectiveness.", + "author": "Zhiyuan Zhang; Qi Su; Xu Sun", + "authorids": "/z/zhiyuan-zhang/; /q/qi-su/; /x/xu-sun/", + "bibtex": "@inproceedings{zhang-etal-2022-dim,\n title = \"Dim-Krum: Backdoor-Resistant Federated Learning for {NLP} with Dimension-wise Krum-Based Aggregation\",\n author = \"Zhang, Zhiyuan and\n Su, Qi and\n Sun, Xu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.25/\",\n doi = \"10.18653/v1/2022.findings-emnlp.25\",\n pages = \"339--354\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.25.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.25/", + "pdf_size": 585224, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4192244361194289017&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "MOE Key Laboratory of Computational Linguistics, School of Computer Science, Peking University; School of Foreign Languages, Peking University + MOE Key Laboratory of Computational Linguistics, School of Computer Science, Peking University; MOE Key Laboratory of Computational Linguistics, School of Computer Science, Peking University", + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.384", + "title": "Dimension Reduction for Efficient Dense Retrieval via Conditional Autoencoder", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Dense retrievers encode queries and documents and map them in an embedding space using pre-trained language models. These embeddings need to be high-dimensional to fit training signals and guarantee the retrieval effectiveness of dense retrievers. However, these high-dimensional embeddings lead to larger index storage and higher retrieval latency. To reduce the embedding dimensions of dense retrieval, this paper proposes a Conditional Autoencoder (ConAE) to compress the high-dimensional embeddings to maintain the same embedding distribution and better recover the ranking features. Our experiments show that ConAE is effective in compressing embeddings by achieving comparable ranking performance with its teacher model and making the retrieval system more efficient. Our further analyses show that ConAE can alleviate the redundancy of the embeddings of dense retrieval with only one linear layer. All codes of this work are available at https://github.com/NEUIR/ConAE.", + "author": "Zhenghao Liu; Han Zhang; Chenyan Xiong; Zhiyuan Liu; Yu Gu; Xiaohua Li", + "authorids": "/z/zhenghao-liu/; /h/han-zhang/; /c/chenyan-xiong/; /z/zhiyuan-liu/; /y/yu-gu/; /x/xiaohua-li/", + "bibtex": "@inproceedings{liu-etal-2022-dimension,\n title = \"Dimension Reduction for Efficient Dense Retrieval via Conditional Autoencoder\",\n author = \"Liu, Zhenghao and\n Zhang, Han and\n Xiong, Chenyan and\n Liu, Zhiyuan and\n Gu, Yu and\n Li, Xiaohua\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.384/\",\n doi = \"10.18653/v1/2022.emnlp-main.384\",\n pages = \"5692--5698\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.384.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.384/", + "pdf_size": 588038, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11370544933495440884&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Technology, Northeastern University, China; Department of Computer Science and Technology, Northeastern University, China; Microsoft Research, United States; Department of Computer Science and Technology, Institute for AI, Tsinghua University, China + Beijing National Research Center for Information Science and Technology, China; Department of Computer Science and Technology, Northeastern University, China; Department of Computer Science and Technology, Northeastern University, China", + "aff_domain": "mail.neu.edu.cn;stumail.neu.edu.cn;microsoft.com;tsinghua.edu.cn;mail.neu.edu.cn;mail.neu.edu.cn", + "email": "mail.neu.edu.cn;stumail.neu.edu.cn;microsoft.com;tsinghua.edu.cn;mail.neu.edu.cn;mail.neu.edu.cn", + "github": "https://github.com/NEUIR/ConAE", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2+3;0;0", + "aff_unique_norm": "Northeastern University;Microsoft Research;Tsinghua University;Beijing National Research Center for Information Science and Technology", + "aff_unique_dep": "Department of Computer Science and Technology;;Department of Computer Science and Technology, Institute for AI;", + "aff_unique_url": "http://www.neu.edu.cn/;https://www.microsoft.com/en-us/research;https://www.tsinghua.edu.cn;", + "aff_unique_abbr": "NEU;MSR;Tsinghua;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0+0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.809", + "title": "Directions for NLP Practices Applied to Online Hate Speech Detection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Addressing hate speech in online spaces has been conceptualized as a classification task that uses Natural Language Processing (NLP) techniques. Through this conceptualization, the hate speech detection task has relied on common conventions and practices from NLP. For instance, inter-annotator agreement is conceptualized as a way to measure dataset quality and certain metrics and benchmarks are used to assure model generalization. However, hate speech is a deeply complex and situated concept that eludes such static and disembodied practices. In this position paper, we critically reflect on these methodologies for hate speech detection, we argue that many conventions in NLP are poorly suited for the problem and encourage researchers to develop methods that are more appropriate for the task.", + "author": "Paula Fortuna; Monica Dominguez; Leo Wanner; Zeerak Talat", + "authorids": "/p/paula-fortuna/; /m/monica-dominguez/; /l/leo-wanner/; /z/zeerak-talat/", + "bibtex": "@inproceedings{fortuna-etal-2022-directions,\n title = \"Directions for {NLP} Practices Applied to Online Hate Speech Detection\",\n author = \"Fortuna, Paula and\n Dominguez, Monica and\n Wanner, Leo and\n Talat, Zeerak\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.809/\",\n doi = \"10.18653/v1/2022.emnlp-main.809\",\n pages = \"11794--11805\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.809.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.809/", + "pdf_size": 170548, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1267792522701014788&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "NLP Group, Pompeu Fabra University, Barcelona, Spain; NLP Group, Pompeu Fabra University, Barcelona, Spain; ICREA and Pompeu Fabra University, Barcelona, Spain; Simon Fraser University, Burnaby, Vancouver", + "aff_domain": "upf.edu;upf.edu;upf.edu;sfu.ca", + "email": "upf.edu;upf.edu;upf.edu;sfu.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Pompeu Fabra University;Simon Fraser University", + "aff_unique_dep": "NLP Group;", + "aff_unique_url": "https://www.upf.edu;https://www.sfu.ca", + "aff_unique_abbr": "UPF;SFU", + "aff_campus_unique_index": "0;0;0;1", + "aff_campus_unique": "Barcelona;Burnaby", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "Spain;Canada" + }, + { + "id": "2022.emnlp-main.223", + "title": "DisCup: Discriminator Cooperative Unlikelihood Prompt-tuning for Controllable Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prompt learning with immensely large Casual Language Models (CLMs) has been shown promising for attribute-controllable text generation (CTG). However, vanilla prompt tuning tends to imitate training corpus characteristics beyond the control attributes, resulting in a poor generalization ability. Moreover, it is less able to capture the relationship between different attributes, further limiting the control performance. In this paper, we propose a new CTG approach, namely DisCup, which incorporates the attribute knowledge of discriminator to optimize the control-prompts, steering a frozen CLM to produce attribute-specific texts. Specifically, the frozen CLM model, capable of producing multitudinous texts, is first used to generate the next-token candidates based on the context, so as to ensure the diversity of tokens to be predicted. Then, we leverage an attribute-discriminator to select desired/undesired tokens from those candidates, providing the inter-attribute knowledge. Finally, we bridge the above two traits by an unlikelihood objective for prompt-tuning. Extensive experimental results show that DisCup can achieve a new state-of-the-art control performance while maintaining an efficient and high-quality text generation, only relying on around 10 virtual tokens.", + "author": "Hanqing Zhang; Dawei Song", + "authorids": "/h/hanqing-zhang/; /d/dawei-song/", + "bibtex": "@inproceedings{zhang-song-2022-discup,\n title = \"{D}is{C}up: Discriminator Cooperative Unlikelihood Prompt-tuning for Controllable Text Generation\",\n author = \"Zhang, Hanqing and\n Song, Dawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.223/\",\n doi = \"10.18653/v1/2022.emnlp-main.223\",\n pages = \"3392--3406\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.223.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.223/", + "pdf_size": 477306, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13508088634055301553&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 6, + "aff": "Beijing Institute of Technology, Beijing, China; Beijing Institute of Technology, Beijing, China + The Open University, United Kingdom", + "aff_domain": "bit.edu.cn;bit.edu.cn", + "email": "bit.edu.cn;bit.edu.cn", + "github": "https://github.com/littlehacker26/disc-cooperative-up-tuning", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "Beijing Institute of Technology;The Open University", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bit.edu.cn/;https://www.open.ac.uk", + "aff_unique_abbr": "BIT;OU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0+1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "2022.emnlp-main.703", + "title": "DiscoSense: Commonsense Reasoning with Discourse Connectives", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present DiscoSense, a benchmark for commonsense reasoning via understanding a wide variety of discourse connectives. We generate compelling distractors in DiscoSense using Conditional Adversarial Filtering, an extension of Adversarial Filtering that employs conditional generation. We show that state-of-the-art pre-trained language models struggle to perform well on DiscoSense, which makes this dataset ideal for evaluating next-generation commonsense reasoning systems.", + "author": "Prajjwal Bhargava; Vincent Ng", + "authorids": "/p/prajjwal-bhargava/; /v/vincent-ng/", + "bibtex": "@inproceedings{bhargava-ng-2022-discosense,\n title = \"{D}isco{S}ense: Commonsense Reasoning with Discourse Connectives\",\n author = \"Bhargava, Prajjwal and\n Ng, Vincent\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.703/\",\n doi = \"10.18653/v1/2022.emnlp-main.703\",\n pages = \"10295--10310\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.703.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.703/", + "pdf_size": 298329, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15732350727077240988&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Human Language Technology Research Institute, University of Texas at Dallas; Human Language Technology Research Institute, University of Texas at Dallas", + "aff_domain": "protonmail.com;hlt.utdallas.edu", + "email": "protonmail.com;hlt.utdallas.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Texas at Dallas", + "aff_unique_dep": "Human Language Technology Research Institute", + "aff_unique_url": "https://www.utdallas.edu", + "aff_unique_abbr": "UT Dallas", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Dallas", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.380", + "title": "Discord Questions: A Computational Approach To Diversity Analysis in News Coverage", + "track": "main", + "status": "finding", + "award": false, + "abstract": "There are many potential benefits to news readers accessing diverse sources. Modern news aggregators do the hard work of organizing the news, offering readers a plethora of source options, but choosing which source to read remains challenging.We propose a new framework to assist readers in identifying source differences and gaining an understanding of news coverage diversity.The framework is based on the generation of Discord Questions: questions with a diverse answer pool, explicitly illustrating source differences.To assemble a prototype of the framework, we focus on two components: (1) discord question generation, the task of generating questions answered differently by sources, for which we propose an automatic scoring method, and create a model that improves performance from current question generation (QG) methods by 5%, (2) answer consolidation, the task of grouping answers to a question that are semantically similar, for which we collect data and repurpose a method that achieves 81% balanced accuracy on our realistic test set.We illustrate the framework\u2019s feasibility through a prototype interface. Even though model performance at discord QG still lags human performance by more than 15%, generated questions are judged to be more interesting than factoid questions and can reveal differences in the level of detail, sentiment, and reasoning of sources in news coverage. Code is available at https://github.com/Salesforce/discord_questions.", + "author": "Philippe Laban; Chien-Sheng Wu; Lidiya Murakhovs\u2019ka; Xiang Chen; Caiming Xiong", + "authorids": "/p/philippe-laban/; /c/chien-sheng-wu/; /l/lidiya-murakhovska/; /x/xiang-chen/; /c/caiming-xiong/", + "bibtex": "@inproceedings{laban-etal-2022-discord,\n title = \"Discord Questions: A Computational Approach To Diversity Analysis in News Coverage\",\n author = \"Laban, Philippe and\n Wu, Chien-Sheng and\n Murakhovs{'}ka, Lidiya and\n Chen, Xiang and\n Xiong, Caiming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.380/\",\n doi = \"10.18653/v1/2022.findings-emnlp.380\",\n pages = \"5180--5194\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.380.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.380/", + "pdf_size": 1166616, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1553316742164950700&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff": "Salesforce AI Research\u2662; Salesforce AI Research\u2662; Salesforce AI Research\u2662; UCLA\u2663; Salesforce AI Research\u2662", + "aff_domain": "salesforce.com;salesforce.com;salesforce.com; ;salesforce.com", + "email": "salesforce.com;salesforce.com;salesforce.com; ;salesforce.com", + "github": "https://github.com/Salesforce/discord_questions", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Salesforce;University of California, Los Angeles", + "aff_unique_dep": "Salesforce AI Research;", + "aff_unique_url": "https://research.salesforce.com;https://www.ucla.edu", + "aff_unique_abbr": "Salesforce AI;UCLA", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.806", + "title": "Discourse Comprehension: A Question Answering Framework to Represent Sentence Connections", + "track": "main", + "status": "Main", + "award": false, + "abstract": "While there has been substantial progress in text comprehension through simple factoid question answering, more holistic comprehension of a discourse still presents a major challenge (Dunietz et al., 2020). Someone critically reflecting on a text as they read it will pose curiosity-driven, often open-ended questions, which reflect deep understanding of the content and require complex reasoning to answer (Ko et al., 2020; Westera et al., 2020). A key challenge in building and evaluating models for this type of discourse comprehension is the lack of annotated data, especially since collecting answers to such questions requires high cognitive load for annotators.This paper presents a novel paradigm that enables scalable data collection targeting the comprehension of news documents, viewing these questions through the lens of discourse. The resulting corpus, DCQA (Discourse Comprehension by Question Answering), captures both discourse and semantic links between sentences in the form of free-form, open-ended questions. On an evaluation set that we annotated on questions from Ko et al. (2020), we show that DCQA provides valuable supervision for answering open-ended questions. We additionally design pre-training methods utilizing existing question-answering resources, and use synthetic data to accommodate unanswerable questions.", + "author": "Wei-Jen Ko; Cutter Dalton; Mark Simmons; Eliza Fisher; Greg Durrett; Junyi Jessy Li", + "authorids": "/w/wei-jen-ko/; /c/cutter-dalton/; /m/mark-simmons/; /e/eliza-fisher/; /g/greg-durrett/; /j/junyi-jessy-li/", + "bibtex": "@inproceedings{ko-etal-2022-discourse,\n title = \"Discourse Comprehension: A Question Answering Framework to Represent Sentence Connections\",\n author = \"Ko, Wei-Jen and\n Dalton, Cutter and\n Simmons, Mark and\n Fisher, Eliza and\n Durrett, Greg and\n Li, Junyi Jessy\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.806/\",\n doi = \"10.18653/v1/2022.emnlp-main.806\",\n pages = \"11752--11764\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.806.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.806/", + "pdf_size": 617057, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1045881977331737034&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Computer Science, The University of Texas at Austin; Linguistics, University of Colorado Boulder; Linguistics, University of California San Diego; Linguistics, The University of Texas at Austin; Computer Science, The University of Texas at Austin; Linguistics, The University of Texas at Austin", + "aff_domain": "utexas.edu;colorado.edu;ucsd.edu;utexas.edu;cs.utexas.edu;utexas.edu", + "email": "utexas.edu;colorado.edu;ucsd.edu;utexas.edu;cs.utexas.edu;utexas.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;0;0", + "aff_unique_norm": "The University of Texas at Austin;University of Colorado Boulder;University of California, San Diego", + "aff_unique_dep": "Computer Science;Linguistics;Linguistics", + "aff_unique_url": "https://www.utexas.edu;https://www.colorado.edu;https://ucsd.edu", + "aff_unique_abbr": "UT Austin;CU Boulder;UCSD", + "aff_campus_unique_index": "0;1;2;0;0;0", + "aff_campus_unique": "Austin;Boulder;San Diego", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.710", + "title": "Discourse Context Predictability Effects in Hindi Word Order", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We test the hypothesis that discourse predictability influences Hindi syntactic choice. While prior work has shown that a number of factors (e.g., information status, dependency length, and syntactic surprisal) influence Hindi word order preferences, the role of discourse predictability is underexplored in the literature. Inspired by prior work on syntactic priming, we investigate how the words and syntactic structures in a sentence influence the word order of the following sentences. Specifically, we extract sentences from the Hindi-Urdu Treebank corpus (HUTB), permute the preverbal constituents of those sentences, and build a classifier to predict which sentences actually occurred in the corpus against artificially generated distractors. The classifier uses a number of discourse-based features and cognitive features to make its predictions, including dependency length, surprisal, and information status. We find that information status and LSTM-based discourse predictability influence word order choices, especially for non-canonical object-fronted orders. We conclude by situating our results within the broader syntactic priming literature.", + "author": "Sidharth Ranjan; Marten van Schijndel; Sumeet Agarwal; Rajakrishnan Rajkumar", + "authorids": "/s/sidharth-ranjan/; /m/marten-van-schijndel/; /s/sumeet-agarwal/; /r/rajakrishnan-rajkumar/", + "bibtex": "@inproceedings{ranjan-etal-2022-discourse,\n title = \"Discourse Context Predictability Effects in {H}indi Word Order\",\n author = \"Ranjan, Sidharth and\n van Schijndel, Marten and\n Agarwal, Sumeet and\n Rajkumar, Rajakrishnan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.710/\",\n doi = \"10.18653/v1/2022.emnlp-main.710\",\n pages = \"10390--10406\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.710.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.710/", + "pdf_size": 421171, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7430972663195535601&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "IIT Delhi; Cornell University; IIT Delhi; IISER Bhopal", + "aff_domain": "gmail.com;cornell.edu;iitd.ac.in;iiserb.ac.in", + "email": "gmail.com;cornell.edu;iitd.ac.in;iiserb.ac.in", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "Indian Institute of Technology Delhi;Cornell University;Indian Institute of Science Education and Research", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.iitd.ac.in;https://www.cornell.edu;https://www.iiserbhopal.ac.in", + "aff_unique_abbr": "IITD;Cornell;IISER", + "aff_campus_unique_index": "0;0;2", + "aff_campus_unique": "Delhi;;Bhopal", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "India;United States" + }, + { + "id": "2022.emnlp-main.303", + "title": "Discourse-Aware Soft Prompting for Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Current efficient fine-tuning methods(e.g., adapters, prefix-tuning, etc.) have optimized conditional text generation via training a small set of extra parameters of the neural language model, while freezing the rest for efficiency. While showing strong performance on some generation tasks, they don\u2019t generalize across all generation tasks. We show that soft-prompt based conditional text generation can be improved with simple and efficient methods that simulate modeling the discourse structure of human written text.We investigate two design choices: First, we apply hierarchical blocking on the prefix parameters to simulate a higher-level discourse structure of human written text. Second, we apply attention sparsity on the prefix parameters at different layers of the network and learn sparse transformations on the softmax-function. We show that structured design of prefix parameters yields more coherent, faithful and relevant generations than the baseline prefix-tuning on all generation tasks.", + "author": "Marjan Ghazvininejad; Vladimir Karpukhin; Vera Gor; Asli Celikyilmaz", + "authorids": "/m/marjan-ghazvininejad/; /v/vladimir-karpukhin/; /v/vera-gor/; /a/asli-celikyilmaz/", + "bibtex": "@inproceedings{ghazvininejad-etal-2022-discourse,\n title = \"Discourse-Aware Soft Prompting for Text Generation\",\n author = \"Ghazvininejad, Marjan and\n Karpukhin, Vladimir and\n Gor, Vera and\n Celikyilmaz, Asli\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.303/\",\n doi = \"10.18653/v1/2022.emnlp-main.303\",\n pages = \"4570--4589\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.303.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.303/", + "pdf_size": 3939909, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15241658178001811774&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.228", + "title": "Discovering Differences in the Representation of People using Contextualized Semantic Axes", + "track": "main", + "status": "Main", + "award": false, + "abstract": "A common paradigm for identifying semantic differences across social and temporal contexts is the use of static word embeddings and their distances. In particular, past work has compared embeddings against \u201csemantic axes\u201d that represent two opposing concepts. We extend this paradigm to BERT embeddings, and construct contextualized axes that mitigate the pitfall where antonyms have neighboring representations. We validate and demonstrate these axes on two people-centric datasets: occupations from Wikipedia, and multi-platform discussions in extremist, men\u2019s communities over fourteen years. In both studies, contextualized semantic axes can characterize differences among instances of the same word type. In the latter study, we show that references to women and the contexts around them have become more detestable over time.", + "author": "Li Lucy; Divya Tadimeti; David Bamman", + "authorids": "/l/li-lucy/; /d/divya-tadimeti/; /d/david-bamman/", + "bibtex": "@inproceedings{lucy-etal-2022-discovering,\n title = \"Discovering Differences in the Representation of People using Contextualized Semantic Axes\",\n author = \"Lucy, Li and\n Tadimeti, Divya and\n Bamman, David\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.228/\",\n doi = \"10.18653/v1/2022.emnlp-main.228\",\n pages = \"3477--3494\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.228.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.228/", + "pdf_size": 1676338, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16159325400134843445&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of California, Berkeley; University of California, Berkeley; University of California, Berkeley", + "aff_domain": "berkeley.edu;berkeley.edu;berkeley.edu", + "email": "berkeley.edu;berkeley.edu;berkeley.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of California, Berkeley", + "aff_unique_dep": "", + "aff_unique_url": "https://www.berkeley.edu", + "aff_unique_abbr": "UC Berkeley", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Berkeley", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.513", + "title": "Discovering Language-neutral Sub-networks in Multilingual Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multilingual pre-trained language models transfer remarkably well on cross-lingual downstream tasks. However, the extent to which they learn language-neutral representations (i.e., shared representations that encode similar phenomena across languages), and the effect of such representations on cross-lingual transfer performance, remain open questions.In this work, we conceptualize language neutrality of multilingual models as a function of the overlap between language-encoding sub-networks of these models. We employ the lottery ticket hypothesis to discover sub-networks that are individually optimized for various languages and tasks. Our evaluation across three distinct tasks and eleven typologically-diverse languages demonstrates that sub-networks for different languages are topologically similar (i.e., language-neutral), making them effective initializations for cross-lingual transfer with limited performance degradation.", + "author": "Negar Foroutan; Mohammadreza Banaei; R\u00e9mi Lebret; Antoine Bosselut; Karl Aberer", + "authorids": "/n/negar-foroutan/; /m/mohammadreza-banaei/; /r/remi-lebret/; /a/antoine-bosselut/; /k/karl-aberer/", + "bibtex": "@inproceedings{foroutan-etal-2022-discovering,\n title = \"Discovering Language-neutral Sub-networks in Multilingual Language Models\",\n author = \"Foroutan, Negar and\n Banaei, Mohammadreza and\n Lebret, R{\\'e}mi and\n Bosselut, Antoine and\n Aberer, Karl\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.513/\",\n doi = \"10.18653/v1/2022.emnlp-main.513\",\n pages = \"7560--7575\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.513.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.513/", + "pdf_size": 2333402, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17155905963244979240&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "EPFL; EPFL; EPFL; EPFL; EPFL", + "aff_domain": "epfl.ch;epfl.ch;epfl.ch;epfl.ch;epfl.ch", + "email": "epfl.ch;epfl.ch;epfl.ch;epfl.ch;epfl.ch", + "github": "https://github.com/negar-foroutan/multiLMs-lang-neutral-subnets", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne", + "aff_unique_dep": "", + "aff_unique_url": "https://www.epfl.ch", + "aff_unique_abbr": "EPFL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Switzerland" + }, + { + "id": "2022.emnlp-main.379", + "title": "Discovering Low-rank Subspaces for Language-agnostic Multilingual Representations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large pretrained multilingual language models (ML-LMs) have shown remarkable capabilities of zero-shot cross-lingual transfer, without direct cross-lingual supervision. While these results are promising, follow-up works found that, within the multilingual embedding spaces, there exists strong language identity information which hinders the expression of linguistic factors shared across languages. For semantic tasks like cross-lingual sentence retrieval, it is desired to remove such language identity signals to fully leverage semantic information. In this work, we provide a novel view of projecting away language-specific factors from a multilingual embedding space. Specifically, we discover that there exists a low-rank subspace that primarily encodes information irrelevant to semantics (e.g., syntactic information). To identify this subspace, we present a simple but effective unsupervised method based on singular value decomposition with multiple monolingual corpora as input. Once the subspace is found, we can directly project the original embeddings into the null space to boost language agnosticism without finetuning. We systematically evaluate our method on various tasks including the challenging language-agnostic QA retrieval task. Empirical results show that applying our method consistently leads to improvements over commonly used ML-LMs.", + "author": "Zhihui Xie; Handong Zhao; Tong Yu; Shuai Li", + "authorids": "/z/zhihui-xie/; /h/handong-zhao/; /t/tong-yu/; /s/shuai-li/", + "bibtex": "@inproceedings{xie-etal-2022-discovering,\n title = \"Discovering Low-rank Subspaces for Language-agnostic Multilingual Representations\",\n author = \"Xie, Zhihui and\n Zhao, Handong and\n Yu, Tong and\n Li, Shuai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.379/\",\n doi = \"10.18653/v1/2022.emnlp-main.379\",\n pages = \"5617--5633\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.379.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.379/", + "pdf_size": 2311180, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10135203024165377967&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Shanghai Jiao Tong University; Adobe Research; Adobe Research; Shanghai Jiao Tong University", + "aff_domain": "sjtu.edu.cn;adobe.com;adobe.com;sjtu.edu.cn", + "email": "sjtu.edu.cn;adobe.com;adobe.com;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "Shanghai Jiao Tong University;Adobe", + "aff_unique_dep": ";Adobe Research", + "aff_unique_url": "https://www.sjtu.edu.cn;https://research.adobe.com", + "aff_unique_abbr": "SJTU;Adobe", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.354", + "title": "Discrete Cross-Modal Alignment Enables Zero-Shot Speech Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "End-to-end Speech Translation (ST) aims at translating the source language speech into target language text without generating the intermediate transcriptions. However, the training of end-to-end methods relies on parallel ST data, which are difficult and expensive to obtain. Fortunately, the supervised data for automatic speech recognition (ASR) and machine translation (MT) are usually more accessible, making zero-shot speech translation a potential direction. Existing zero-shot methods fail to align the two modalities of speech and text into a shared semantic space, resulting in much worse performance compared to the supervised ST methods. In order to enable zero-shot ST, we propose a novel Discrete Cross-Modal Alignment (DCMA) method that employs a shared discrete vocabulary space to accommodate and match both modalities of speech and text. Specifically, we introduce a vector quantization module to discretize the continuous representations of speech and text into a finite set of virtual tokens, and use ASR data to map corresponding speech and text to the same virtual token in a shared codebook. This way, source language speech can be embedded in the same semantic space as the source language text, which can be then transformed into target language text with an MT module. Experiments on multiple language pairs demonstrate that our zero-shot ST method significantly improves the SOTA, and even performers on par with the strong supervised ST baselines.", + "author": "Chen Wang; Yuchen Liu; Boxing Chen; Jiajun Zhang; Wei Luo; Zhongqiang Huang; Chengqing Zong", + "authorids": "/c/chen-wang/; /y/yuchen-liu/; /b/boxing-chen/; /j/jiajun-zhang/; /w/wei-luo/; /z/zhongqiang-huang/; /c/chengqing-zong/", + "bibtex": "@inproceedings{wang-etal-2022-discrete,\n title = \"Discrete Cross-Modal Alignment Enables Zero-Shot Speech Translation\",\n author = \"Wang, Chen and\n Liu, Yuchen and\n Chen, Boxing and\n Zhang, Jiajun and\n Luo, Wei and\n Huang, Zhongqiang and\n Zong, Chengqing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.354/\",\n doi = \"10.18653/v1/2022.emnlp-main.354\",\n pages = \"5291--5302\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.354.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.354/", + "pdf_size": 833019, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8548707174855620124&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff": "National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences; Machine Intelligence Technology Lab, Alibaba DAMO Academy; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences; Machine Intelligence Technology Lab, Alibaba DAMO Academy; Machine Intelligence Technology Lab, Alibaba DAMO Academy; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences", + "aff_domain": "ia.ac.cn;ia.ac.cn;alibaba-inc.com;nlpr.ia.ac.cn;alibaba-inc.com;alibaba-inc.com;nlpr.ia.ac.cn", + "email": "ia.ac.cn;ia.ac.cn;alibaba-inc.com;nlpr.ia.ac.cn;alibaba-inc.com;alibaba-inc.com;nlpr.ia.ac.cn", + "github": "https://github.com/ZNLP/zero-shot-st", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;2;0+1;2;2;0+1", + "aff_unique_norm": "National Laboratory of Pattern Recognition;University of Chinese Academy of Sciences;Alibaba DAMO Academy", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence;Machine Intelligence Technology Lab", + "aff_unique_url": ";http://www.ucas.ac.cn;https://damo.alibaba.com", + "aff_unique_abbr": ";UCAS;Alibaba DAMO", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0+0;0;0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.383", + "title": "Disentangling Task Relations for Few-shot Text Classification via Self-Supervised Hierarchical Task Clustering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Few-Shot Text Classification (FSTC) imitates humans to learn a new text classifier efficiently with only few examples, by leveraging prior knowledge from historical tasks. However, most prior works assume that all the tasks are sampled from a single data source, which cannot adapt to real-world scenarios where tasks are heterogeneous and lie in different distributions. As such, existing methods may suffer from their globally knowledge-shared mechanisms to handle the task heterogeneity. On the other hand, inherent task relationships are not explicitly captured, making task knowledge unorganized and hard to transfer to new tasks. Thus, we explore a new FSTC setting where tasks can come from a diverse range of data sources. To address the task heterogeneity, we propose a self-supervised hierarchical task clustering (SS-HTC) method. SS-HTC not only customizes the cluster-specific knowledge by dynamically organizing heterogeneous tasks into different clusters in hierarchical levels but also disentangles the underlying relations between tasks to improve the interpretability. Empirically, extensive experiments on five public FSTC benchmark datasets demonstrate the effectiveness of SS-HTC.", + "author": "Juan Zha; Zheng Li; Ying Wei; Yu Zhang", + "authorids": "/j/juan-zha/; /z/zheng-li/; /y/ying-wei/; /y/yu-zhang/", + "bibtex": "@inproceedings{zha-etal-2022-disentangling,\n title = \"Disentangling Task Relations for Few-shot Text Classification via Self-Supervised Hierarchical Task Clustering\",\n author = \"Zha, Juan and\n Li, Zheng and\n Wei, Ying and\n Zhang, Yu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.383/\",\n doi = \"10.18653/v1/2022.findings-emnlp.383\",\n pages = \"5236--5247\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.383.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.383/", + "pdf_size": 625376, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1624586435667652730&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Southern California, CA, USA; Amazon.com Inc, CA, USA; City University of Hong Kong, Hong Kong, China; Southern University of Science and Technology, China", + "aff_domain": "usc.com;amazon.com;cityu.edu.hk;gmail.com", + "email": "usc.com;amazon.com;cityu.edu.hk;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "University of Southern California;Amazon.com Inc;City University of Hong Kong;Southern University of Science and Technology", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.usc.edu;https://www.amazon.com;https://www.cityu.edu.hk;https://www.sustech.edu.cn", + "aff_unique_abbr": "USC;Amazon;CityU;SUSTech", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;0;1;1", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.591", + "title": "Disentangling Uncertainty in Machine Translation Evaluation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Trainable evaluation metrics for machine translation (MT) exhibit strong correlation with human judgements, but they are often hard to interpret and might produce unreliable scores under noisy or out-of-domain data. Recent work has attempted to mitigate this with simple uncertainty quantification techniques (Monte Carlo dropout and deep ensembles), however these techniques (as we show) are limited in several ways \u2013 for example, they are unable to distinguish between different kinds of uncertainty, and they are time and memory consuming. In this paper, we propose more powerful and efficient uncertainty predictors for MT evaluation, and we assess their ability to target different sources of aleatoric and epistemic uncertainty. To this end, we develop and compare training objectives for the COMET metric to enhance it with an uncertainty prediction output, including heteroscedastic regression, divergence minimization, and direct uncertainty prediction.Our experiments show improved results on uncertainty prediction for the WMT metrics task datasets, with a substantial reduction in computational costs. Moreover, they demonstrate the ability of these predictors to address specific uncertainty causes in MT evaluation, such as low quality references and out-of-domain data.", + "author": "Chrysoula Zerva; Taisiya Glushkova; Ricardo Rei; Andr\u00e9 F. T. Martins", + "authorids": "/c/chrysoula-zerva/; /t/taisiya-glushkova/; /r/ricardo-rei/; /a/andre-f-t-martins/", + "bibtex": "@inproceedings{zerva-etal-2022-disentangling,\n title = \"Disentangling Uncertainty in Machine Translation Evaluation\",\n author = \"Zerva, Chrysoula and\n Glushkova, Taisiya and\n Rei, Ricardo and\n Martins, Andr{\\'e} F. T.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.591/\",\n doi = \"10.18653/v1/2022.emnlp-main.591\",\n pages = \"8622--8641\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.591.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.591/", + "pdf_size": 855519, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5162646824498462075&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Instituto de Telecomunica\u00e7\u00f5es + Instituto Superior T\u00e9cnico & LUMLIS (Lisbon ELLIS Unit); Instituto de Telecomunica\u00e7\u00f5es + Instituto Superior T\u00e9cnico & LUMLIS (Lisbon ELLIS Unit); Unbabel + INESC-ID + Instituto Superior T\u00e9cnico & LUMLIS (Lisbon ELLIS Unit); Instituto de Telecomunica\u00e7\u00f5es + Unbabel + Instituto Superior T\u00e9cnico & LUMLIS (Lisbon ELLIS Unit)", + "aff_domain": "tecnico.ulisboa.pt;tecnico.ulisboa.pt;tecnico.ulisboa.pt;tecnico.ulisboa.pt", + "email": "tecnico.ulisboa.pt;tecnico.ulisboa.pt;tecnico.ulisboa.pt;tecnico.ulisboa.pt", + "github": "https://github.com/deep-spin/uncertainties_MT_eval", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;2+3+1;0+2+1", + "aff_unique_norm": "Instituto de Telecomunica\u00e7\u00f5es;Instituto Superior T\u00e9cnico;Unbabel;INESC-ID", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.it.pt;https://www.ist.utl.pt;https://www.unbabel.com;https://www.inesc-id.pt", + "aff_unique_abbr": ";IST;;INESC-ID", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Lisbon", + "aff_country_unique_index": "0+0;0+0;0+0+0;0+0+0", + "aff_country_unique": "Portugal" + }, + { + "id": "2022.emnlp-main.152", + "title": "Distill The Image to Nowhere: Inversion Knowledge Distillation for Multimodal Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Past works on multimodal machine translation (MMT) elevate bilingual setup by incorporating additional aligned vision information.However, an image-must requirement of the multimodal dataset largely hinders MMT\u2019s development \u2014 namely that it demands an aligned form of [image, source text, target text].This limitation is generally troublesome during the inference phase especially when the aligned image is not provided as in the normal NMT setup.Thus, in this work, we introduce IKD-MMT, a novel MMT framework to support the image-free inference phase via an inversion knowledge distillation scheme.In particular, a multimodal feature generator is executed with a knowledge distillation module, which directly generates the multimodal feature from (only) source texts as the input.While there have been a few prior works entertaining the possibility to support image-free inference for machine translation, their performances have yet to rival the image-must translation.In our experiments, we identify our method as the first image-free approach to comprehensively rival or even surpass (almost) all image-must frameworks, and achieved the state-of-the-art result on the often-used Multi30k benchmark. Our code and data are availableat: https://github.com/pengr/IKD-mmt/tree/master..", + "author": "Ru Peng; Yawen Zeng; Jake Zhao", + "authorids": "/r/ru-peng/; /y/yawen-zeng/; /j/jake-zhao/", + "bibtex": "@inproceedings{peng-etal-2022-distill,\n title = \"Distill The Image to Nowhere: Inversion Knowledge Distillation for Multimodal Machine Translation\",\n author = \"Peng, Ru and\n Zeng, Yawen and\n Zhao, Jake\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.152/\",\n doi = \"10.18653/v1/2022.emnlp-main.152\",\n pages = \"2379--2390\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.152.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.152/", + "pdf_size": 725952, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14189318654140183061&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Zhejiang University; Tencent WeChat; Zhejiang University", + "aff_domain": "gmail.com;gmail.com;zju.edu.cn", + "email": "gmail.com;gmail.com;zju.edu.cn", + "github": "https://github.com/pengr/IKD-mmt/tree/master", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Zhejiang University;Tencent", + "aff_unique_dep": ";WeChat", + "aff_unique_url": "https://www.zju.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "ZJU;Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.370", + "title": "Distillation-Resistant Watermarking for Model Protection in NLP", + "track": "main", + "status": "finding", + "award": false, + "abstract": "How can we protect the intellectual property of trained NLP models? Modern NLP models are prone to stealing by querying and distilling from their publicly exposed APIs. However, existing protection methods such as watermarking only work for images but are not applicable to text. We propose Distillation-Resistant Watermarking (DRW), a novel technique to protect NLP models from being stolen via distillation. DRW protects a model by injecting watermarks into the victim\u2019s prediction probability corresponding to a secret key and is able to detect such a key by probing a suspect model. We prove that a protected model still retains the original accuracy within a certain bound. We evaluate DRW on a diverse set of NLP tasks including text classification, part-of-speech tagging, and named entity recognition. Experiments show that DRW protects the original model and detects stealing suspects at 100% mean average precision for all four tasks while the prior method fails on two.", + "author": "Xuandong Zhao; Lei Li; Yu-Xiang Wang", + "authorids": "/x/xuandong-zhao/; /l/lei-li/; /y/yu-xiang-wang/", + "bibtex": "@inproceedings{zhao-etal-2022-distillation,\n title = \"Distillation-Resistant Watermarking for Model Protection in {NLP}\",\n author = \"Zhao, Xuandong and\n Li, Lei and\n Wang, Yu-Xiang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.370/\",\n doi = \"10.18653/v1/2022.findings-emnlp.370\",\n pages = \"5044--5055\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.370.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.370/", + "pdf_size": 2790702, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13528376733102110230&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "University of California, Santa Barbara; University of California, Santa Barbara; University of California, Santa Barbara", + "aff_domain": "cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu", + "email": "cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu", + "github": "https://github.com/XuandongZhao/DRW2021", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of California, Santa Barbara", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucsb.edu", + "aff_unique_abbr": "UCSB", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Santa Barbara", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.608", + "title": "Distilled Dual-Encoder Model for Vision-Language Understanding", + "track": "main", + "status": "Main", + "award": false, + "abstract": "On vision-language understanding (VLU) tasks, fusion-encoder vision-language models achieve superior results but sacrifice efficiency because of the simultaneous encoding of images and text. On the contrary, the dual encoder model that separately encodes images and text has the advantage in efficiency, while failing on VLU tasks due to the lack of deep cross-modal interactions. To get the best of both worlds, we propose DiDE, a framework that distills the knowledge of the fusion-encoder teacher model into the dual-encoder student model. Since the cross-modal interaction is the key to the superior performance of teacher model but is absent in the student model, we encourage the student not only to mimic the predictions of teacher, but also to calculate the cross-modal attention distributions and align with the teacher. Experimental results demonstrate that DiDE is competitive with the fusion-encoder teacher model in performance (only a 1% drop) while enjoying 4 times faster inference. Further analyses reveal that the proposed cross-modal attention distillation is crucial to the success of our framework.", + "author": "Zekun Wang; Wenhui Wang; Haichao Zhu; Ming Liu; Bing Qin; Furu Wei", + "authorids": "/z/zekun-wang/; /w/wenhui-wang/; /h/haichao-zhu/; /m/ming-liu/; /b/bing-qin/; /f/furu-wei/", + "bibtex": "@inproceedings{wang-etal-2022-distilled,\n title = \"Distilled Dual-Encoder Model for Vision-Language Understanding\",\n author = \"Wang, Zekun and\n Wang, Wenhui and\n Zhu, Haichao and\n Liu, Ming and\n Qin, Bing and\n Wei, Furu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.608/\",\n doi = \"10.18653/v1/2022.emnlp-main.608\",\n pages = \"8901--8913\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.608.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.608/", + "pdf_size": 896442, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17390284033829320305&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Harbin Institute of Technology; Microsoft Research; Harbin Institute of Technology; Harbin Institute of Technology+Peng Cheng Laboratory; Harbin Institute of Technology+Peng Cheng Laboratory; Microsoft Research", + "aff_domain": "ir.hit.edu.cn;microsoft.com;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;microsoft.com", + "email": "ir.hit.edu.cn;microsoft.com;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;microsoft.com", + "github": "https://github.com/kugwzk/DiDE", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0+2;0+2;1", + "aff_unique_norm": "Harbin Institute of Technology;Microsoft Corporation;Peng Cheng Laboratory", + "aff_unique_dep": ";Microsoft Research;", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.microsoft.com/en-us/research;http://www.pcl.ac.cn", + "aff_unique_abbr": "HIT;MSR;PCL", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Harbin;", + "aff_country_unique_index": "0;1;0;0+0;0+0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.236", + "title": "Distilling Causal Effect from Miscellaneous Other-Class for Continual Named Entity Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Continual Learning for Named Entity Recognition (CL-NER) aims to learn a growing number of entity types over time from a stream of data. However, simply learning Other-Class in the same way as new entity types amplifies the catastrophic forgetting and leads to a substantial performance drop. The main cause behind this is that Other-Class samples usually contain old entity types, and the old knowledge in these Other-Class samples is not preserved properly. Thanks to the causal inference, we identify that the forgetting is caused by the missing causal effect from the old data.To this end, we propose a unified causal framework to retrieve the causality from both new entity types and Other-Class.Furthermore, we apply curriculum learning to mitigate the impact of label noise and introduce a self-adaptive weight for balancing the causal effects between new entity types and Other-Class. Experimental results on three benchmark datasets show that our method outperforms the state-of-the-art method by a large margin. Moreover, our method can be combined with the existing state-of-the-art methods to improve the performance in CL-NER.", + "author": "Junhao Zheng; Zhanxian Liang; Haibin Chen; Qianli Ma", + "authorids": "/j/junhao-zheng/; /z/zhanxian-liang/; /h/haibin-chen/; /q/qianli-ma/", + "bibtex": "@inproceedings{zheng-etal-2022-distilling,\n title = \"Distilling Causal Effect from Miscellaneous Other-Class for Continual Named Entity Recognition\",\n author = \"Zheng, Junhao and\n Liang, Zhanxian and\n Chen, Haibin and\n Ma, Qianli\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.236/\",\n doi = \"10.18653/v1/2022.emnlp-main.236\",\n pages = \"3602--3615\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.236.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.236/", + "pdf_size": 1204508, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7552441113317697754&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China; School of Computer Science and Engineering, South China University of Technology, Guangzhou, China; School of Computer Science and Engineering, South China University of Technology, Guangzhou, China; School of Computer Science and Engineering, South China University of Technology, Guangzhou, China", + "aff_domain": "outlook.com; ; ;scut.edu.cn", + "email": "outlook.com; ; ;scut.edu.cn", + "github": "https://github.com/zzz47zzz/CFNER", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "South China University of Technology", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.scut.edu.cn", + "aff_unique_abbr": "SCUT", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Guangzhou", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-industry.43", + "title": "Distilling Multilingual Transformers into CNNs for Scalable Intent Classification", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "We describe an application of Knowledge Distillation used to distill and deploy multilingual Transformer models for voice assistants, enabling text classification for customers globally. Transformers have set new state-of-the-art results for tasks like intent classification, and multilingual models exploit cross-lingual transfer to allow serving requests across 100+ languages. However, their prohibitive inference time makes them impractical to deploy in real-world scenarios with low latency requirements, such as is the case of voice assistants. We address the problem of cross-architecture distillation of multilingual Transformers to simpler models, while maintaining multilinguality without performance degradation. Training multilingual student models has received little attention, and is our main focus. We show that a teacher-student framework, where the teacher\u2019s unscaled activations (logits) on unlabelled data are used to supervise student model training, enables distillation of Transformers into efficient multilingual CNN models. Our student model achieves equivalent performance as the teacher, and outperforms a similar model trained on the labelled data used to train the teacher model. This approach has enabled us to accurately serve global customer requests at speed (18x improvement), scale, and low cost.", + "author": "Besnik Fetahu; Akash Veeragouni; Oleg Rokhlenko; Shervin Malmasi", + "authorids": "/b/besnik-fetahu/; /a/akash-veeragouni/; /o/oleg-rokhlenko/; /s/shervin-malmasi/", + "bibtex": "@inproceedings{fetahu-etal-2022-distilling,\n title = \"Distilling Multilingual Transformers into {CNN}s for Scalable Intent Classification\",\n author = \"Fetahu, Besnik and\n Veeragouni, Akash and\n Rokhlenko, Oleg and\n Malmasi, Shervin\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.43/\",\n doi = \"10.18653/v1/2022.emnlp-industry.43\",\n pages = \"429--439\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.43.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.43/", + "pdf_size": 557072, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5757537952848418676&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Amazon.com Inc., Seattle, WA, USA; Amazon.com Inc., Seattle, WA, USA; Amazon.com Inc., Seattle, WA, USA; Amazon.com Inc., Seattle, WA, USA", + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Amazon.com Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Seattle", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.51", + "title": "Distinguish Sense from Nonsense: Out-of-Scope Detection for Virtual Assistants", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Out of Scope (OOS) detection in Conversational AI solutions enables a chatbot to handle a conversation gracefully when it is unable to make sense of the end-user query. Accurately tagging a query as out-of-domain is particularly hard in scenarios when the chatbot is not equipped to handle a topic which has semantic overlap with an existing topic it is trained on. We propose a simple yet effective OOS detection method that outperforms standard OOS detection methods in a real-world deployment of virtual assistants. We discuss the various design and deployment considerations for a cloud platform solution to train virtual assistants and deploy them at scale. Additionally, we propose a collection of datasets that replicates real-world scenarios and show comprehensive results in various settings using both offline and online evaluation metrics.", + "author": "Cheng Qian; Haode Qi; Gengyu Wang; Ladislav Kunc; Saloni Potdar", + "authorids": "/c/cheng-qian/; /h/haode-qi/; /g/gengyu-wang/; /l/ladislav-kunc/; /s/saloni-potdar/", + "bibtex": "@inproceedings{qian-etal-2022-distinguish,\n title = \"Distinguish Sense from Nonsense: Out-of-Scope Detection for Virtual Assistants\",\n author = \"Qian, Cheng and\n Qi, Haode and\n Wang, Gengyu and\n Kunc, Ladislav and\n Potdar, Saloni\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.51/\",\n doi = \"10.18653/v1/2022.emnlp-industry.51\",\n pages = \"502--511\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.51.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.51/", + "pdf_size": 259338, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3598638306027388336&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "IBM Watson; IBM Watson; IBM Watson; IBM Watson; Apple Inc.", + "aff_domain": "ibm.com;ibm.com;ibm.com;ibm.com;apple.com", + "email": "ibm.com;ibm.com;ibm.com;ibm.com;apple.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "IBM;Apple Inc.", + "aff_unique_dep": "IBM Watson;", + "aff_unique_url": "https://www.ibm.com/watson;https://www.apple.com", + "aff_unique_abbr": "IBM Watson;Apple", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.532", + "title": "DivEMT: Neural Machine Translation Post-Editing Effort Across Typologically Diverse Languages", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We introduce DivEMT, the first publicly available post-editing study of Neural Machine Translation (NMT) over a typologically diverse set of target languages. Using a strictly controlled setup, 18 professional translators were instructed to translate or post-edit the same set of English documents into Arabic, Dutch, Italian, Turkish, Ukrainian, and Vietnamese. During the process, their edits, keystrokes, editing times and pauses were recorded, enabling an in-depth, cross-lingual evaluation of NMT quality and post-editing effectiveness. Using this new dataset, we assess the impact of two state-of-the-art NMT systems, Google Translate and the multilingual mBART-50 model, on translation productivity. We find that post-editing is consistently faster than translation from scratch. However, the magnitude of productivity gains varies widely across systems and languages, highlighting major disparities in post-editing effectiveness for languages at different degrees of typological relatedness to English, even when controlling for system architecture and training data size. We publicly release the complete dataset including all collected behavioral data, to foster new research on the translation capabilities of NMT systems for typologically diverse languages.", + "author": "Gabriele Sarti; Arianna Bisazza; Ana Guerberof-Arenas; Antonio Toral", + "authorids": "/g/gabriele-sarti/; /a/arianna-bisazza/; /a/ana-guerberof-arenas/; /a/antonio-toral/", + "bibtex": "@inproceedings{sarti-etal-2022-divemt,\n title = \"{D}iv{EMT}: Neural Machine Translation Post-Editing Effort Across Typologically Diverse Languages\",\n author = \"Sarti, Gabriele and\n Bisazza, Arianna and\n Guerberof-Arenas, Ana and\n Toral, Antonio\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.532/\",\n doi = \"10.18653/v1/2022.emnlp-main.532\",\n pages = \"7795--7816\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.532.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.532/", + "pdf_size": 1159064, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=443150368853516030&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Center for Language and Cognition (CLCG), University of Groningen; Center for Language and Cognition (CLCG), University of Groningen; Center for Language and Cognition (CLCG), University of Groningen; Center for Language and Cognition (CLCG), University of Groningen", + "aff_domain": "rug.nl;rug.nl;rug.nl;rug.nl", + "email": "rug.nl;rug.nl;rug.nl;rug.nl", + "github": "https://github.com/gsarti/divemt", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Groningen", + "aff_unique_dep": "Center for Language and Cognition (CLCG)", + "aff_unique_url": "https://www.rug.nl", + "aff_unique_abbr": "RUG", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Netherlands" + }, + { + "id": "2022.emnlp-main.794", + "title": "Diverse Parallel Data Synthesis for Cross-Database Adaptation of Text-to-SQL Parsers", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Text-to-SQL parsers typically struggle with databases unseen during the train time. Adapting Text-to-SQL parsers to new database schemas is a challenging problem owing to a vast diversity of schemas and zero availability of natural language queries in new schemas. We present ReFill, a framework for synthesizing high-quality and textually diverse parallel datasets for adapting Text-to-SQL parsers. Unlike prior methods that utilize SQL-to-Text generation, ReFill learns to retrieve-and-edit text queries in existing schemas and transfer them to the new schema. ReFill utilizes a simple method for retrieving diverse existing text, masking their schema-specific tokens, and refilling with tokens relevant to the new schema. We show that this process leads to significantly more diverse text queries than achievable by standard SQL-to-Text generation models. Through experiments on several databases, we show that adapting a parser by finetuning it on datasets synthesized by ReFill consistently outperforms prior data-augmentation methods.", + "author": "Abhijeet Awasthi; Ashutosh Sathe; Sunita Sarawagi", + "authorids": "/a/abhijeet-awasthi/; /a/ashutosh-sathe/; /s/sunita-sarawagi/", + "bibtex": "@inproceedings{awasthi-etal-2022-diverse,\n title = \"Diverse Parallel Data Synthesis for Cross-Database Adaptation of Text-to-{SQL} Parsers\",\n author = \"Awasthi, Abhijeet and\n Sathe, Ashutosh and\n Sarawagi, Sunita\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.794/\",\n doi = \"10.18653/v1/2022.emnlp-main.794\",\n pages = \"11548--11562\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.794.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.794/", + "pdf_size": 499771, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4597745129451632075&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Indian Institute of Technology Bombay, India; Indian Institute of Technology Bombay, India; Indian Institute of Technology Bombay, India", + "aff_domain": "cse.iitb.ac.in;cse.iitb.ac.in;cse.iitb.ac.in", + "email": "cse.iitb.ac.in;cse.iitb.ac.in;cse.iitb.ac.in", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Indian Institute of Technology Bombay", + "aff_unique_dep": "", + "aff_unique_url": "https://www.iitb.ac.in", + "aff_unique_abbr": "IIT Bombay", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Bombay", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.findings-emnlp.48", + "title": "Diving Deep into Modes of Fact Hallucinations in Dialogue Systems", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Knowledge Graph(KG) grounded conversations often use large pre-trained models and usually suffer from fact hallucination. Frequently entities with no references in knowledge sources and conversation history are introduced into responses, thus hindering the flow of the conversation\u2014existing work attempt to overcome this issue by tweaking the training procedure or using a multi-step refining method. However, minimal effort is put into constructing an entity-level hallucination detection system, which would provide fine-grained signals that control fallacious content while generating responses. As a first step to address this issue, we dive deep to identify various modes of hallucination in KG-grounded chatbots through human feedback analysis. Secondly, we propose a series of perturbation strategies to create a synthetic dataset named FADE (FActual Dialogue Hallucination DEtection Dataset). Finally, we conduct comprehensive data analyses and create multiple baseline models for hallucination detection to compare against human-verified data and already established benchmarks.", + "author": "Souvik Das; Sougata Saha; Rohini Srihari", + "authorids": "/s/souvik-das/; /s/sougata-saha/; /r/rohini-k-srihari/", + "bibtex": "@inproceedings{das-etal-2022-diving,\n title = \"Diving Deep into Modes of Fact Hallucinations in Dialogue Systems\",\n author = \"Das, Souvik and\n Saha, Sougata and\n Srihari, Rohini\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.48/\",\n doi = \"10.18653/v1/2022.findings-emnlp.48\",\n pages = \"684--699\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.48.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.48/", + "pdf_size": 1902138, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15192821375665660508&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "https://github.com/souvikdgp16/FADE", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.275", + "title": "Do Charge Prediction Models Learn Legal Theory?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The charge prediction task aims to predict the charge for a case given its fact description. Recent models have already achieved impressive accuracy in this task, however, little is understood about the mechanisms they use to perform the judgment.For practical applications, a charge prediction model should conform to the certain legal theory in civil law countries, as under the framework of civil law, all cases are judged according to certain local legal theories. In China, for example, nearly all criminal judges make decisions based on the Four Elements Theory (FET).In this paper, we argue that trustworthy charge prediction models should take legal theories into consideration, and standing on prior studies in model interpretation, we propose three principles for trustworthy models should follow in this task, which are sensitive, selective, and presumption of innocence.We further design a new framework to evaluate whether existing charge prediction models learn legal theories. Our findings indicate that, while existing charge prediction models meet the selective principle on a benchmark dataset, most of them are still not sensitive enough and do not satisfy the presumption of innocence. Our code and dataset are released at https://github.com/ZhenweiAn/EXP_LJP.", + "author": "Zhenwei An; Quzhe Huang; Cong Jiang; Yansong Feng; Dongyan Zhao", + "authorids": "/z/zhenwei-an/; /q/quzhe-huang/; /c/cong-jiang/; /y/yansong-feng/; /d/dongyan-zhao/", + "bibtex": "@inproceedings{an-etal-2022-charge,\n title = \"Do Charge Prediction Models Learn Legal Theory?\",\n author = \"An, Zhenwei and\n Huang, Quzhe and\n Jiang, Cong and\n Feng, Yansong and\n Zhao, Dongyan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.275/\",\n doi = \"10.18653/v1/2022.findings-emnlp.275\",\n pages = \"3757--3768\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.275.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.275/", + "pdf_size": 2026963, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11651790958084020367&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff": "Wangxuan Institute of Computer Technology, Peking University + School of Software & Microelectronics, Peking University; Wangxuan Institute of Computer Technology, Peking University + School of Intelligence Science and Technology, Peking University; Peking University Law School + Institute for Artificial Intelligence, Peking University; Wangxuan Institute of Computer Technology, Peking University + The MOE Key Laboratory of Computational Linguistics, Peking University; Wangxuan Institute of Computer Technology, Peking University + Institute for Artificial Intelligence, Peking University + The MOE Key Laboratory of Computational Linguistics, Peking University", + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "https://github.com/ZhenweiAn/EXP_LJP", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0+0;0+0;0+0+0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "Wangxuan Institute of Computer Technology", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.752", + "title": "Do Children Texts Hold The Key To Commonsense Knowledge?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Compiling comprehensive repositories of commonsense knowledge is a long-standing problem in AI. Many concerns revolve around the issue of reporting bias, i.e., that frequency in text sources is not a good proxy for relevance or truth. This paper explores whether children\u2019s texts hold the key to commonsense knowledge compilation, based on the hypothesis that such content makes fewer assumptions on the reader\u2019s knowledge, and therefore spells out commonsense more explicitly. An analysis with several corpora shows that children\u2019s texts indeed contain much more, and more typical commonsense assertions. Moreover, experiments show that this advantage can be leveraged in popular language-model-based commonsense knowledge extraction settings, where task-unspecific fine-tuning on small amounts of children texts (childBERT) already yields significant improvements. This provides a refreshing perspective different from the common trend of deriving progress from ever larger models and corpora.", + "author": "Julien Romero; Simon Razniewski", + "authorids": "/j/julien-romero/; /s/simon-razniewski/", + "bibtex": "@inproceedings{romero-razniewski-2022-children,\n title = \"Do Children Texts Hold The Key To Commonsense Knowledge?\",\n author = \"Romero, Julien and\n Razniewski, Simon\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.752/\",\n doi = \"10.18653/v1/2022.emnlp-main.752\",\n pages = \"10954--10959\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.752.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.752/", + "pdf_size": 205353, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17151133890867312406&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "T\u00b4el\u00b4ecom SudParis; Max Planck Institute for Informatics", + "aff_domain": "telecom-sudparis.eu;mpi-inf.mpg.de", + "email": "telecom-sudparis.eu;mpi-inf.mpg.de", + "github": "", + "project": "https://www.mpi-inf.mpg.de/children-texts-for-commonsense", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "T\u00e9l\u00e9com SudParis;Max Planck Institute for Informatics", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.telecom-sudparis.eu;https://mpi-inf.mpg.de", + "aff_unique_abbr": "TSP;MPII", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "France;Germany" + }, + { + "id": "2022.findings-emnlp.128", + "title": "Do Language Models Understand Measurements?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent success of pre-trained language models (PLMs) has stimulated interest in their ability to understand and work with numbers. Yet, the numerical reasoning over measurements has not been formally studied despite their importance. In this study, we show that PLMs lack the capability required for reasoning over measurements. Furthermore, we find that a language model trained on a measurement-rich corpus shows better performance on understanding measurements. We propose a simple embedding strategy to better distinguish between numbers and units, which leads to a significant improvement in the probing tasks.", + "author": "Sungjin Park; Seungwoo Ryu; Edward Choi", + "authorids": "/s/sungjin-park/; /s/seungwoo-ryu/; /e/edward-choi/", + "bibtex": "@inproceedings{park-etal-2022-language,\n title = \"Do Language Models Understand Measurements?\",\n author = \"Park, Sungjin and\n Ryu, Seungwoo and\n Choi, Edward\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.128/\",\n doi = \"10.18653/v1/2022.findings-emnlp.128\",\n pages = \"1782--1792\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.128.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.128/", + "pdf_size": 304386, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10614583294792012995&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 6, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.206", + "title": "Do Text-to-Text Multi-Task Learners Suffer from Task Conflict?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Traditional multi-task learning architectures learn a single model across multiple tasks through a shared encoder followed by task-specific decoders. Learning these models often requires specialized training algorithms that address task-conflict in the shared parameter updates, which otherwise can lead to negative transfer. A new type of multi-task learning within NLP homogenizes multi-task architectures as a shared encoder and language model decoder, which does surprisingly well across a range of diverse tasks. Does this new architecture suffer from task-conflicts that require specialized training algorithms? We study how certain factors in the shift towards text-to-text models affects multi-task conflict and negative transfer, finding that both directional conflict and transfer are surprisingly constant across architectures.", + "author": "David Mueller; Nicholas Andrews; Mark Dredze", + "authorids": "/d/david-mueller/; /n/nicholas-andrews/; /m/mark-dredze/", + "bibtex": "@inproceedings{mueller-etal-2022-text,\n title = \"Do Text-to-Text Multi-Task Learners Suffer from Task Conflict?\",\n author = \"Mueller, David and\n Andrews, Nicholas and\n Dredze, Mark\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.206/\",\n doi = \"10.18653/v1/2022.findings-emnlp.206\",\n pages = \"2843--2858\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.206.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.206/", + "pdf_size": 614107, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10306770997590189605&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science, Johns Hopkins University + Human Language Technology Center of Excellence, Johns Hopkins University; Human Language Technology Center of Excellence, Johns Hopkins University; Department of Computer Science, Johns Hopkins University", + "aff_domain": "jhu.edu;jhu.edu;cs.jhu.edu", + "email": "jhu.edu;jhu.edu;cs.jhu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0;0", + "aff_unique_norm": "Johns Hopkins University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.jhu.edu", + "aff_unique_abbr": "JHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.100", + "title": "Do Vision-and-Language Transformers Learn Grounded Predicate-Noun Dependencies?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent advances in vision-and-language modeling have seen the development of Transformer architectures that achieve remarkable performance on multimodal reasoning tasks.Yet, the exact capabilities of these black-box models are still poorly understood. While much of previous work has focused on studying their ability to learn meaning at the word-level, their ability to track syntactic dependencies between words has received less attention.We take a first step in closing this gap by creating a new multimodal task targeted at evaluating understanding of predicate-noun dependencies in a controlled setup.We evaluate a range of state-of-the-art models and find that their performance on the task varies considerably, with some models performing relatively well and others at chance level. In an effort to explain this variability, our analyses indicate that the quality (and not only sheer quantity) of pretraining data is essential. Additionally, the best performing models leverage fine-grained multimodal pretraining objectives in addition to the standard image-text matching objectives.This study highlights that targeted and controlled evaluations are a crucial step for a precise and rigorous test of the multimodal knowledge of vision-and-language models.", + "author": "Mitja Nikolaus; Emmanuelle Salin; Stephane Ayache; Abdellah Fourtassi; Benoit Favre", + "authorids": "/m/mitja-nikolaus/; /e/emmanuelle-salin/; /s/stephane-ayache/; /a/abdellah-fourtassi/; /b/benoit-favre/", + "bibtex": "@inproceedings{nikolaus-etal-2022-vision,\n title = \"Do Vision-and-Language Transformers Learn Grounded Predicate-Noun Dependencies?\",\n author = \"Nikolaus, Mitja and\n Salin, Emmanuelle and\n Ayache, Stephane and\n Fourtassi, Abdellah and\n Favre, Benoit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.100/\",\n doi = \"10.18653/v1/2022.emnlp-main.100\",\n pages = \"1538--1555\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.100.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.100/", + "pdf_size": 726425, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11864157942225204566&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Aix Marseille Univ, Universit\u00e9 de Toulon, CNRS, LIS, Marseille, France + Aix-Marseille Univ, CNRS, LPL, Aix-en-Provence, France; Aix Marseille Univ, Universit\u00e9 de Toulon, CNRS, LIS, Marseille, France; Aix Marseille Univ, Universit\u00e9 de Toulon, CNRS, LIS, Marseille, France; Aix Marseille Univ, Universit\u00e9 de Toulon, CNRS, LIS, Marseille, France; Aix Marseille Univ, Universit\u00e9 de Toulon, CNRS, LIS, Marseille, France", + "aff_domain": "univ-amu.fr; ; ; ; ", + "email": "univ-amu.fr; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;0;0;0", + "aff_unique_norm": "Aix Marseille University;Aix-Marseille University", + "aff_unique_dep": "Laboratoire d'Informatique et Syst\u00e8mes;CNRS, LPL", + "aff_unique_url": "https://www.univ-amu.fr;https://www.univ-amu.fr", + "aff_unique_abbr": "AMU;AMU", + "aff_campus_unique_index": "0+1;0;0;0;0", + "aff_campus_unique": "Marseille;Aix-en-Provence", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "France" + }, + { + "id": "2022.findings-emnlp.131", + "title": "Doc2Bot: Accessing Heterogeneous Documents via Conversational Bots", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This paper introduces Doc2Bot, a novel dataset for building machines that help users seek information via conversations. This is of particular interest for companies and organizations that own a large number of manuals or instruction books. Despite its potential, the nature of our task poses several challenges: (1) documents contain various structures that hinder the ability of machines to comprehend, and (2) user information needs are often underspecified. Compared to prior datasets that either focus on a single structural type or overlook the role of questioning to uncover user needs, the Doc2Bot dataset is developed to target such challenges systematically. Our dataset contains over 100,000 turns based on Chinese documents from five domains, larger than any prior document-grounded dialog dataset for information seeking. We propose three tasks in Doc2Bot: (1) dialog state tracking to track user intentions, (2) dialog policy learning to plan system actions and contents, and (3) response generation which generates responses based on the outputs of the dialog policy. Baseline methods based on the latest deep learning models are presented, indicating that our proposed tasks are challenging and worthy of further research.", + "author": "Haomin Fu; Yeqin Zhang; Haiyang Yu; Jian Sun; Fei Huang; Luo Si; Yongbin Li; Cam Tu Nguyen", + "authorids": "/h/haomin-fu/; /y/yeqin-zhang/; /h/haiyang-yu/; /j/jian-sun/; /f/fei-huang/; /l/luo-si/; /y/yongbin-li/; /c/cam-tu-nguyen/", + "bibtex": "@inproceedings{fu-etal-2022-doc2bot,\n title = \"{D}oc2{B}ot: Accessing Heterogeneous Documents via Conversational Bots\",\n author = \"Fu, Haomin and\n Zhang, Yeqin and\n Yu, Haiyang and\n Sun, Jian and\n Huang, Fei and\n Si, Luo and\n Li, Yongbin and\n Nguyen, Cam Tu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.131/\",\n doi = \"10.18653/v1/2022.findings-emnlp.131\",\n pages = \"1820--1836\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.131.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.131/", + "pdf_size": 2561723, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9377422158069616295&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, China+Alibaba Group; State Key Laboratory for Novel Software Technology, Nanjing University, China+Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; State Key Laboratory for Novel Software Technology, Nanjing University, China+Alibaba Group", + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;nju.edu.cn", + "github": "https://github.com/Doc2Bot/Doc2Bot", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;1;1;1;1;1;0+1", + "aff_unique_norm": "Nanjing University;Alibaba Group", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology;", + "aff_unique_url": "http://www.nju.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "Nanjing U;Alibaba", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.139", + "title": "DocFin: Multimodal Financial Prediction and Bias Mitigation using Semi-structured Documents", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Financial prediction is complex due to the stochastic nature of the stock market. Semi-structured financial documents present comprehensive financial data in tabular formats, such as earnings, profit-loss statements, and balance sheets, and can often contain rich technical analysis along with a textual discussion of corporate history, and management analysis, compliance, and risks. Existing research focuses on the textual and audio modalities of financial disclosures from company conference calls to forecast stock volatility and price movement, but ignores the rich tabular data available in financial reports. Moreover, the economic realm is still plagued with a severe under-representation of various communities spanning diverse demographics, gender, and native speakers. In this work, we show that combining tabular data from financial semi-structured documents with text transcripts and audio recordings not only improves stock volatility and price movement prediction by 5-12% but also reduces gender bias caused due to audio-based neural networks by over 30%.", + "author": "Puneet Mathur; Mihir Goyal; Ramit Sawhney; Ritik Mathur; Jochen Leidner; Franck Dernoncourt; Dinesh Manocha", + "authorids": "/p/puneet-mathur/; /m/mihir-goyal/; /r/ramit-sawhney/; /r/ritik-mathur/; /j/jochen-l-leidner/; /f/franck-dernoncourt/; /d/dinesh-manocha/", + "bibtex": "@inproceedings{mathur-etal-2022-docfin,\n title = \"{D}oc{F}in: Multimodal Financial Prediction and Bias Mitigation using Semi-structured Documents\",\n author = \"Mathur, Puneet and\n Goyal, Mihir and\n Sawhney, Ramit and\n Mathur, Ritik and\n Leidner, Jochen and\n Dernoncourt, Franck and\n Manocha, Dinesh\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.139/\",\n doi = \"10.18653/v1/2022.findings-emnlp.139\",\n pages = \"1933--1940\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.139.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.139/", + "pdf_size": 489933, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1620829310558147490&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "University of Maryland; IIIT-Delhi; Georgia Institute of Technology; IIT-Roorkee; University of Sheffield; Adobe Research; University of Maryland", + "aff_domain": "umd.edu;iiitd.ac.in;gatech.edu;me.iitr.ac.in;acm.org;adobe.com;umd.edu", + "email": "umd.edu;iiitd.ac.in;gatech.edu;me.iitr.ac.in;acm.org;adobe.com;umd.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;4;5;0", + "aff_unique_norm": "University of Maryland;Indraprastha Institute of Information Technology, Delhi;Georgia Institute of Technology;Indian Institute of Technology Roorkee;University of Sheffield;Adobe", + "aff_unique_dep": ";;;;;Adobe Research", + "aff_unique_url": "https://www/umd.edu;https://www.iiitdelhi.ac.in;https://www.gatech.edu;https://www.iitr.ac.in;https://www.sheffield.ac.uk;https://research.adobe.com", + "aff_unique_abbr": "UMD;IIIT-D;Georgia Tech;IIT Roorkee;Sheffield;Adobe", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Delhi", + "aff_country_unique_index": "0;1;0;1;2;0;0", + "aff_country_unique": "United States;India;United Kingdom" + }, + { + "id": "2022.emnlp-main.51", + "title": "DocInfer: Document-level Natural Language Inference using Optimal Evidence Selection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present DocInfer - a novel, end-to-end Document-level Natural Language Inference model that builds a hierarchical document graph enriched through inter-sentence relations (topical, entity-based, concept-based), performs paragraph pruning using the novel SubGraph Pooling layer, followed by optimal evidence selection based on REINFORCE algorithm to identify the most important context sentences for a given hypothesis. Our evidence selection mechanism allows it to transcend the input length limitation of modern BERT-like Transformer models while presenting the entire evidence together for inferential reasoning. We show this is an important property needed to reason on large documents where the evidence may be fragmented and located arbitrarily far from each other. Extensive experiments on popular corpora - DocNLI, ContractNLI, and ConTRoL datasets, and our new proposed dataset called CaseHoldNLI on the task of legal judicial reasoning, demonstrate significant performance gains of 8-12% over SOTA methods. Our ablation studies validate the impact of our model. Performance improvement of 3-6% on annotation-scarce downstream tasks of fact verification, multiple-choice QA, and contract clause retrieval demonstrates the usefulness of DocInfer beyond primary NLI tasks.", + "author": "Puneet Mathur; Gautam Kunapuli; Riyaz Bhat; Manish Shrivastava; Dinesh Manocha; Maneesh Singh", + "authorids": "/p/puneet-mathur/; /g/gautam-kunapuli/; /r/riyaz-bhat/; /m/manish-shrivastava/; /d/dinesh-manocha/; /m/maneesh-singh/", + "bibtex": "@inproceedings{mathur-etal-2022-docinfer,\n title = \"{D}oc{I}nfer: Document-level Natural Language Inference using Optimal Evidence Selection\",\n author = \"Mathur, Puneet and\n Kunapuli, Gautam and\n Bhat, Riyaz and\n Shrivastava, Manish and\n Manocha, Dinesh and\n Singh, Maneesh\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.51/\",\n doi = \"10.18653/v1/2022.emnlp-main.51\",\n pages = \"809--824\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.51.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.51/", + "pdf_size": 1164535, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12816653552690593276&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 0, + "aff": "University of Maryland, College Park, MD, USA; IBM, Bengaluru, India; Motive, San Francisco, USA; IIIT, Hyderabad, India; University of Maryland, College Park, MD, USA; Motive, San Francisco, USA", + "aff_domain": "umd.edu; ; ; ; ; ", + "email": "umd.edu; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;0;2", + "aff_unique_norm": "University of Maryland;IBM;Motive;International Institute of Information Technology", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www/umd.edu;https://www.ibm.com;;https://iiit Hyderabad.ac.in", + "aff_unique_abbr": "UMD;IBM;;IIIT-H", + "aff_campus_unique_index": "0;1;2;3;0;2", + "aff_campus_unique": "College Park;Bengaluru;San Francisco;Hyderabad", + "aff_country_unique_index": "0;1;0;1;0;0", + "aff_country_unique": "United States;India" + }, + { + "id": "2022.emnlp-main.499", + "title": "Does Corpus Quality Really Matter for Low-Resource Languages?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The vast majority of non-English corpora are derived from automatically filtered versions of CommonCrawl. While prior work has identified major issues on the quality of these datasets (Kreutzer et al., 2021), it is not clear how this impacts downstream performance. Taking representation learning in Basque as a case study, we explore tailored crawling (manually identifying and scraping websites with high-quality content) as an alternative to filtering CommonCrawl. Our new corpus, called EusCrawl, is similar in size to the Basque portion of popular multilingual corpora like CC100 and mC4, yet it has a much higher quality according to native annotators. For instance, 66% of documents are rated as high-quality for EusCrawl, in contrast with <33% for both mC4 and CC100. Nevertheless, we obtain similar results on downstream NLU tasks regardless of the corpus used for pre-training. Our work suggests that NLU performance in low-resource languages is not primarily constrained by the quality of the data, and other factors like corpus size and domain coverage can play a more important role.", + "author": "Mikel Artetxe; Itziar Aldabe; Rodrigo Agerri; Olatz Perez-de-Vi\u00f1aspre; Aitor Soroa", + "authorids": "/m/mikel-artetxe/; /i/itziar-aldabe/; /r/rodrigo-agerri/; /o/olatz-perez-de-vinaspre/; /a/aitor-soroa/", + "bibtex": "@inproceedings{artetxe-etal-2022-corpus,\n title = \"Does Corpus Quality Really Matter for Low-Resource Languages?\",\n author = \"Artetxe, Mikel and\n Aldabe, Itziar and\n Agerri, Rodrigo and\n Perez-de-Vi{\\~n}aspre, Olatz and\n Soroa, Aitor\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.499/\",\n doi = \"10.18653/v1/2022.emnlp-main.499\",\n pages = \"7383--7390\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.499.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.499/", + "pdf_size": 204751, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5168118170049125359&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Meta AI; HiTZ Center, University of the Basque Country (UPV/EHU); HiTZ Center, University of the Basque Country (UPV/EHU); HiTZ Center, University of the Basque Country (UPV/EHU); HiTZ Center, University of the Basque Country (UPV/EHU)", + "aff_domain": "meta.com;ehu.eus;ehu.eus;ehu.eus;ehu.eus", + "email": "meta.com;ehu.eus;ehu.eus;ehu.eus;ehu.eus", + "github": "", + "project": "https://www.ixa.eus/euscrawl/", + "author_num": 5, + "aff_unique_index": "0;1;1;1;1", + "aff_unique_norm": "Meta Platforms, Inc.;University of the Basque Country", + "aff_unique_dep": "Meta AI;HiTZ Center", + "aff_unique_url": "https://meta.com;https://www.ehu.eus/en", + "aff_unique_abbr": "Meta;UPV/EHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1", + "aff_country_unique": "United States;Spain" + }, + { + "id": "2022.emnlp-main.297", + "title": "Does Joint Training Really Help Cascaded Speech Translation?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Currently, in speech translation, the straightforward approach - cascading a recognition system with a translation system - delivers state-of-the-art results.However, fundamental challenges such as error propagation from the automatic speech recognition system still remain.To mitigate these problems, recently, people turn their attention to direct data and propose various joint training methods.In this work, we seek to answer the question of whether joint training really helps cascaded speech translation.We review recent papers on the topic and also investigate a joint training criterion by marginalizing the transcription posterior probabilities.Our findings show that a strong cascaded baseline can diminish any improvements obtained using joint training, and we suggest alternatives to joint training.We hope this work can serve as a refresher of the current speech translation landscape, and motivate research in finding more efficient and creative ways to utilize the direct data for speech translation.", + "author": "Viet Anh Khoa Tran; David Thulke; Yingbo Gao; Christian Herold; Hermann Ney", + "authorids": "/v/viet-anh-khoa-tran/; /d/david-thulke/; /y/yingbo-gao/; /c/christian-herold/; /h/hermann-ney/", + "bibtex": "@inproceedings{tran-etal-2022-joint,\n title = \"Does Joint Training Really Help Cascaded Speech Translation?\",\n author = \"Tran, Viet Anh Khoa and\n Thulke, David and\n Gao, Yingbo and\n Herold, Christian and\n Ney, Hermann\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.297/\",\n doi = \"10.18653/v1/2022.emnlp-main.297\",\n pages = \"4480--4487\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.297.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.297/", + "pdf_size": 230696, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15626101149857033255&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Human Language Technology and Pattern Recognition Group, Computer Science Department, RWTH Aachen University; Human Language Technology and Pattern Recognition Group, Computer Science Department, RWTH Aachen University; Human Language Technology and Pattern Recognition Group, Computer Science Department, RWTH Aachen University; Human Language Technology and Pattern Recognition Group, Computer Science Department, RWTH Aachen University; Human Language Technology and Pattern Recognition Group, Computer Science Department, RWTH Aachen University", + "aff_domain": "i6.informatik.rwth-aachen.de;i6.informatik.rwth-aachen.de;i6.informatik.rwth-aachen.de;i6.informatik.rwth-aachen.de;i6.informatik.rwth-aachen.de", + "email": "i6.informatik.rwth-aachen.de;i6.informatik.rwth-aachen.de;i6.informatik.rwth-aachen.de;i6.informatik.rwth-aachen.de;i6.informatik.rwth-aachen.de", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "RWTH Aachen University", + "aff_unique_dep": "Computer Science Department", + "aff_unique_url": "https://www.rwth-aachen.de", + "aff_unique_abbr": "RWTH", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Aachen", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.501", + "title": "Does Self-Rationalization Improve Robustness to Spurious Correlations?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Rationalization is fundamental to human reasoning and learning. NLP models trained to produce rationales along with predictions, called self-rationalization models, have been investigated for their interpretability and utility to end-users. However, the extent to which training with human-written rationales facilitates learning remains an under-explored question. We ask whether training models to self-rationalize can aid in their learning to solve tasks for the right reasons. Specifically, we evaluate how training self-rationalization models with free-text rationales affects robustness to spurious correlations in fine-tuned encoder-decoder and decoder-only models of six different sizes. We evaluate robustness to spurious correlations by measuring performance on 1) manually annotated challenge datasets and 2) subsets of original test sets where reliance on spurious correlations would fail to produce correct answers. We find that while self-rationalization can improve robustness to spurious correlations in low-resource settings, it tends to hurt robustness in higher-resource settings. Furthermore, these effects depend on model family and size, as well as on rationale content. Together, our results suggest that explainability can come at the cost of robustness; thus, appropriate care should be taken when training self-rationalizing models with the goal of creating more trustworthy models.", + "author": "Alexis Ross; Matthew Peters; Ana Marasovic", + "authorids": "/a/alexis-ross/; /m/matthew-e-peters/; /a/ana-marasovic/", + "bibtex": "@inproceedings{ross-etal-2022-self,\n title = \"Does Self-Rationalization Improve Robustness to Spurious Correlations?\",\n author = \"Ross, Alexis and\n Peters, Matthew and\n Marasovic, Ana\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.501/\",\n doi = \"10.18653/v1/2022.emnlp-main.501\",\n pages = \"7403--7416\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.501.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.501/", + "pdf_size": 279648, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18302249403671671037&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Massachusetts Institute of Technology, Cambridge, MA, USA; Allen Institute for AI, Seattle, WA, USA; University of Utah, Salt Lake City, UT, USA", + "aff_domain": "mit.edu;allenai.org;utah.edu", + "email": "mit.edu;allenai.org;utah.edu", + "github": "https://github.com/allenai/rationale_robustness", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Massachusetts Institute of Technology;Allen Institute for AI;University of Utah", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.mit.edu;https://allenai.org;https://www.utah.edu", + "aff_unique_abbr": "MIT;AI2;U of U", + "aff_campus_unique_index": "0;1;2", + "aff_campus_unique": "Cambridge;Seattle;Salt Lake City", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.11", + "title": "Does Simultaneous Speech Translation need Simultaneous Models?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In simultaneous speech translation (SimulST), finding the best trade-off between high output quality and low latency is a challenging task. To meet the latency constraints posed by different application scenarios, multiple dedicated SimulST models are usually trained and maintained, generating high computational costs. In this paper, also motivated by the increased sensitivity towards sustainable AI, we investigate whether a single model trained offline can serve both offline and simultaneous applications under different latency regimes without additional training or adaptation. Experiments on en->de, es show that, aside from facilitating the adoption of well-established offline architectures and training strategies without affecting latency, offline training achieves similar or better quality compared to the standard SimulST training protocol, also being competitive with the state-of-the-art system.", + "author": "Sara Papi; Marco Gaido; Matteo Negri; Marco Turchi", + "authorids": "/s/sara-papi/; /m/marco-gaido/; /m/matteo-negri/; /m/marco-turchi/", + "bibtex": "@inproceedings{papi-etal-2022-simultaneous,\n title = \"Does Simultaneous Speech Translation need Simultaneous Models?\",\n author = \"Papi, Sara and\n Gaido, Marco and\n Negri, Matteo and\n Turchi, Marco\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.11/\",\n doi = \"10.18653/v1/2022.findings-emnlp.11\",\n pages = \"141--153\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.11.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.11/", + "pdf_size": 468355, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11153357263233051457&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "Fondazione Bruno Kessler+University of Trento; Fondazione Bruno Kessler+University of Trento; Fondazione Bruno Kessler; Zoom Video Communications", + "aff_domain": "fbk.eu;fbk.eu;fbk.eu;zoom.us", + "email": "fbk.eu;fbk.eu;fbk.eu;zoom.us", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0;2", + "aff_unique_norm": "Fondazione Bruno Kessler;University of Trento;Zoom Video Communications", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.fbk.eu;https://www.unitn.it;https://zoom.us", + "aff_unique_abbr": "FBK;UniTN;Zoom", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;1", + "aff_country_unique": "Italy;United States" + }, + { + "id": "2022.emnlp-main.592", + "title": "Does Your Model Classify Entities Reasonably? Diagnosing and Mitigating Spurious Correlations in Entity Typing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Entity typing aims at predicting one or more words that describe the type(s) of a specific mention in a sentence. Due to shortcuts from surface patterns to annotated entity labels and biased training, existing entity typing models are subject to the problem of spurious correlations. To comprehensively investigate the faithfulness and reliability of entity typing methods, we first systematically define distinct kinds of model biases that are reflected mainly from spurious correlations. Particularly, we identify six types of existing model biases, including mention-context bias, lexical overlapping bias, named entity bias, pronoun bias, dependency bias, and overgeneralization bias. To mitigate model biases, we then introduce a counterfactual data augmentation method. By augmenting the original training set with their debiasedcounterparts, models are forced to fully comprehend sentences and discover the fundamental cues for entity typing, rather than relying on spurious correlations for shortcuts. Experimental results on the UFET dataset show our counterfactual data augmentation approach helps improve generalization of different entity typing models with consistently better performance on both the original and debiased test sets.", + "author": "Nan Xu; Fei Wang; Bangzheng Li; Mingtao Dong; Muhao Chen", + "authorids": "/n/nan-xu/; /f/fei-wang/; /b/bangzheng-li/; /m/mingtao-dong/; /m/muhao-chen/", + "bibtex": "@inproceedings{xu-etal-2022-model,\n title = \"Does Your Model Classify Entities Reasonably? Diagnosing and Mitigating Spurious Correlations in Entity Typing\",\n author = \"Xu, Nan and\n Wang, Fei and\n Li, Bangzheng and\n Dong, Mingtao and\n Chen, Muhao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.592/\",\n doi = \"10.18653/v1/2022.emnlp-main.592\",\n pages = \"8642--8658\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.592.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.592/", + "pdf_size": 657910, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2201099331383454840&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-industry.62", + "title": "Domain Adaptation of Machine Translation with Crowdworkers", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Although a machine translation model trained with a large in-domain parallel corpus achieves remarkable results, it still works poorly when no in-domain data are available. This situation restricts the applicability of machine translation when the target domain\u2019s data are limited. However, there is great demand for high-quality domain-specific machine translation models for many domains. We propose a framework that efficiently and effectively collects parallel sentences in a target domain from the web with the help of crowdworkers.With the collected parallel data, we can quickly adapt a machine translation model to the target domain. Our experiments show that the proposed method can collect target-domain parallel data over a few days at a reasonable cost. We tested it with five domains, and the domain-adapted model improved the BLEU scores to +19.7 by an average of +7.8 points compared to a general-purpose translation model.", + "author": "Makoto Morishita; Jun Suzuki; Masaaki Nagata", + "authorids": "/m/makoto-morishita/; /j/jun-suzuki/; /m/masaaki-nagata/", + "bibtex": "@inproceedings{morishita-etal-2022-domain,\n title = \"Domain Adaptation of Machine Translation with Crowdworkers\",\n author = \"Morishita, Makoto and\n Suzuki, Jun and\n Nagata, Masaaki\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.62/\",\n doi = \"10.18653/v1/2022.emnlp-industry.62\",\n pages = \"606--618\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.62.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.62/", + "pdf_size": 840770, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10059031046160293825&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "NTT Communication Science Laboratories, NTT Corporation1; Tohoku University2; NTT Communication Science Laboratories, NTT Corporation1", + "aff_domain": "hco.ntt.co.jp;tohoku.ac.jp;hco.ntt.co.jp", + "email": "hco.ntt.co.jp;tohoku.ac.jp;hco.ntt.co.jp", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "NTT Communication Science Laboratories;Tohoku University", + "aff_unique_dep": "Communication Science;", + "aff_unique_url": "https://www.ntt-csl.com;https://www.tohoku.ac.jp", + "aff_unique_abbr": "NTT CSL;Tohoku U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.emnlp-main.635", + "title": "Don\u2019t Copy the Teacher: Data and Model Challenges in Embodied Dialogue", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Embodied dialogue instruction following requires an agent to complete a complex sequence of tasks from a natural language exchange. The recent introduction of benchmarks raises the question of how best to train and evaluate models for this multi-turn, multi-agent, long-horizon task. This paper contributes to that conversation, by arguing that imitation learning (IL) and related low-level metrics are actually misleading and do not align with the goals of embodied dialogue research and may hinder progress.We provide empirical comparisons of metrics, analysis of three models, and make suggestions for how the field might best progress. First, we observe that models trained with IL take spurious actions during evaluation. Second, we find that existing models fail to ground query utterances, which are essential for task completion. Third, we argue evaluation should focus on higher-level semantic goals. We will release code to additionally filter the data and benchmark models for improved evaluation.", + "author": "So Yeon Min; Hao Zhu; Ruslan Salakhutdinov; Yonatan Bisk", + "authorids": "/s/so-yeon-min/; /h/hao-zhu/; /r/ruslan-salakhutdinov/; /y/yonatan-bisk/", + "bibtex": "@inproceedings{min-etal-2022-dont,\n title = \"Don`t Copy the Teacher: Data and Model Challenges in Embodied Dialogue\",\n author = \"Min, So Yeon and\n Zhu, Hao and\n Salakhutdinov, Ruslan and\n Bisk, Yonatan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.635/\",\n doi = \"10.18653/v1/2022.emnlp-main.635\",\n pages = \"9361--9368\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.635.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.635/", + "pdf_size": 1870709, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5460181767402218924&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Machine Learning; Language Technologies; Machine Learning; Language Technologies", + "aff_domain": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "email": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "github": "https://github.com/soyeonm/TEACh_FILM", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;1", + "aff_unique_norm": "Machine Learning;Language Technologies", + "aff_unique_dep": ";", + "aff_unique_url": ";", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "", + "aff_country_unique": "" + }, + { + "id": "2022.findings-emnlp.372", + "title": "Don\u2019t Just Clean It, Proxy Clean It: Mitigating Bias by Proxy in Pre-Trained Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Transformer-based pre-trained models are known to encode societal biases not only in their contextual representations, but also in downstream predictions when fine-tuned on task-specific data.We present D-Bias, an approach that selectively eliminates stereotypical associations (e.g, co-occurrence statistics) at fine-tuning, such that the model doesn\u2019t learn to excessively rely on those signals.D-Bias attenuates biases from both identity words and frequently co-occurring proxies, which we select using pointwise mutual information.We apply D-Bias to a) occupation classification, and b) toxicity classification and find that our approach substantially reduces downstream biases (e.g. by > 60% in toxicity classification, for identities that are most frequently flagged as toxic on online platforms).In addition, we show that D-Bias dramatically improves upon scrubbing, i.e., removing only the identity words in question.We also demonstrate that D-Bias easily extends to multiple identities, and achieves competitive performance with two recently proposed debiasing approaches: R-LACE and INLP.", + "author": "Swetasudha Panda; Ari Kobren; Michael Wick; Qinlan Shen", + "authorids": "/s/swetasudha-panda/; /a/ari-kobren/; /m/michael-wick/; /q/qinlan-shen/", + "bibtex": "@inproceedings{panda-etal-2022-dont,\n title = \"Don`t Just Clean It, Proxy Clean It: Mitigating Bias by Proxy in Pre-Trained Models\",\n author = \"Panda, Swetasudha and\n Kobren, Ari and\n Wick, Michael and\n Shen, Qinlan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.372/\",\n doi = \"10.18653/v1/2022.findings-emnlp.372\",\n pages = \"5073--5085\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.372.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.372/", + "pdf_size": 710461, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16580319961995645365&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.509", + "title": "Don\u2019t Prompt, Search! Mining-based Zero-Shot Learning with Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Masked language models like BERT can perform text classification in a zero-shot fashion by reformulating downstream tasks as text infilling. However, this approach is highly sensitive to the template used to prompt the model, yet practitioners are blind when designing them in strict zero-shot settings. In this paper, we propose an alternative mining-based approach for zero-shot learning. Instead of prompting language models, we use regular expressions to mine labeled examples from unlabeled corpora, which can optionally be filtered through prompting, and used to finetune a pretrained model. Our method is more flexible and interpretable than prompting, and outperforms it on a wide range of tasks when using comparable templates. Our results suggest that the success of prompting can partly be explained by the model being exposed to similar examples during pretraining, which can be directly retrieved through regular expressions.", + "author": "Mozes van de Kar; Mengzhou Xia; Danqi Chen; Mikel Artetxe", + "authorids": "/m/mozes-van-de-kar/; /m/mengzhou-xia/; /d/danqi-chen/; /m/mikel-artetxe/", + "bibtex": "@inproceedings{van-de-kar-etal-2022-dont,\n title = \"Don`t Prompt, Search! Mining-based Zero-Shot Learning with Language Models\",\n author = \"van de Kar, Mozes and\n Xia, Mengzhou and\n Chen, Danqi and\n Artetxe, Mikel\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.509/\",\n doi = \"10.18653/v1/2022.emnlp-main.509\",\n pages = \"7508--7520\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.509.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.509/", + "pdf_size": 4074482, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13107884815848897138&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "University of Amsterdam; Princeton University; Princeton University; Meta AI", + "aff_domain": "gmail.com;cs.princeton.edu;cs.princeton.edu;meta.com", + "email": "gmail.com;cs.princeton.edu;cs.princeton.edu;meta.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;2", + "aff_unique_norm": "University of Amsterdam;Princeton University;Meta Platforms, Inc.", + "aff_unique_dep": ";;Meta AI", + "aff_unique_url": "https://www.uva.nl;https://www.princeton.edu;https://meta.com", + "aff_unique_abbr": "UvA;Princeton;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1", + "aff_country_unique": "Netherlands;United States" + }, + { + "id": "2022.emnlp-main.736", + "title": "Don\u2019t Stop Fine-Tuning: On Training Regimes for Few-Shot Cross-Lingual Transfer with Multilingual Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "A large body of recent work highlights the fallacies of zero-shot cross-lingual transfer (ZS-XLT) with large multilingual language models. Namely, their performance varies substantially for different target languages and is the weakest where needed the most: for low-resource languages distant to the source language. One remedy is few-shot transfer (FS-XLT), where leveraging only a few task-annotated instances in the target language(s) may yield sizable performance gains. However, FS-XLT also succumbs to large variation, as models easily overfit to the small datasets. In this work, we present a systematic study focused on a spectrum of FS-XLT fine-tuning regimes, analyzing key properties such as effectiveness, (in)stability, and modularity. We conduct extensive experiments on both higher-level (NLI, paraphrasing) and lower-level tasks (NER, POS), presenting new FS-XLT strategies that yield both improved and more stable FS-XLT across the board. Our findings challenge established FS-XLT methods: e.g., we propose to replace sequential fine-tuning with joint fine-tuning on source and target language instances, offering consistent gains with different number of shots (including resource-rich scenarios). We also show that further gains can be achieved with multi-stage FS-XLT training in which joint multilingual fine-tuning precedes the bilingual source-target specialization.", + "author": "Fabian David Schmidt; Ivan Vuli\u0107; Goran Glava\u0161", + "authorids": "/f/fabian-david-schmidt/; /i/ivan-vulic/; /g/goran-glavas/", + "bibtex": "@inproceedings{schmidt-etal-2022-dont,\n title = \"Don`t Stop Fine-Tuning: On Training Regimes for Few-Shot Cross-Lingual Transfer with Multilingual Language Models\",\n author = \"Schmidt, Fabian David and\n Vuli{\\'c}, Ivan and\n Glava{\\v{s}}, Goran\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.736/\",\n doi = \"10.18653/v1/2022.emnlp-main.736\",\n pages = \"10725--10742\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.736.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.736/", + "pdf_size": 1510816, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11465106311225223895&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 0, + "aff": "Center For Artificial Intelligence and Data Science, University of W\u00fcrzburg, Germany; Language Technology Lab, University of Cambridge, UK+PolyAI Ltd., UK; Center For Artificial Intelligence and Data Science, University of W\u00fcrzburg, Germany", + "aff_domain": "uni-wuerzburg.de;cam.ac.uk;uni-wuerzburg.de", + "email": "uni-wuerzburg.de;cam.ac.uk;uni-wuerzburg.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;0", + "aff_unique_norm": "University of W\u00fcrzburg;University of Cambridge;PolyAI Ltd.", + "aff_unique_dep": "Center For Artificial Intelligence and Data Science;Language Technology Lab;", + "aff_unique_url": "https://www.uni-wuerzburg.de;https://www.cam.ac.uk;https://www.poly.ai", + "aff_unique_abbr": ";Cambridge;PolyAI", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;1+1;0", + "aff_country_unique": "Germany;United Kingdom" + }, + { + "id": "2022.emnlp-main.57", + "title": "DropMix: A Textual Data Augmentation Combining Dropout with Mixup", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Overfitting is a notorious problem when there is insufficient data to train deep neural networks in machine learning tasks. Data augmentation regularization methods such as Dropout, Mixup, and their enhanced variants are effective and prevalent, and achieve promising performance to overcome overfitting. However, in text learning, most of the existing regularization approaches merely adopt ideas from computer vision without considering the importance of dimensionality in natural language processing. In this paper, we argue that the property is essential to overcome overfitting in text learning. Accordingly, we present a saliency map informed textual data augmentation and regularization framework, which combines Dropout and Mixup, namely DropMix, to mitigate the overfitting problem in text learning. In addition, we design a procedure that drops and patches fine grained shapes of the saliency map under the DropMix framework to enhance regularization. Empirical studies confirm the effectiveness of the proposed approach on 12 text classification tasks.", + "author": "Fanshuang Kong; Richong Zhang; Xiaohui Guo; Samuel Mensah; Yongyi Mao", + "authorids": "/f/fanshuang-kong/; /r/richong-zhang/; /x/xiaohui-guo/; /s/samuel-mensah/; /y/yongyi-mao/", + "bibtex": "@inproceedings{kong-etal-2022-dropmix,\n title = \"{D}rop{M}ix: A Textual Data Augmentation Combining Dropout with Mixup\",\n author = \"Kong, Fanshuang and\n Zhang, Richong and\n Guo, Xiaohui and\n Mensah, Samuel and\n Mao, Yongyi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.57/\",\n doi = \"10.18653/v1/2022.emnlp-main.57\",\n pages = \"890--899\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.57.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.57/", + "pdf_size": 2456344, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3710811898473837892&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "SKLSDE, Beihang University, Beijing, China+Zhongguancun Laboratory, Beijing, China; SKLSDE, Beihang University, Beijing, China+Zhongguancun Laboratory, Beijing, China; Hangzhou Innovation Institute, Beihang University, Hangzhou, China; Department of Computer Science, University of Sheffield, UK; School of Electrical Engineering and Computer Science, University of Ottawa, Canada", + "aff_domain": "act.buaa.edu.cn;act.buaa.edu.cn;act.buaa.edu.cn;sheffield.ac.uk;uottawa.ca", + "email": "act.buaa.edu.cn;act.buaa.edu.cn;act.buaa.edu.cn;sheffield.ac.uk;uottawa.ca", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0;2;3", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory;University of Sheffield;University of Ottawa", + "aff_unique_dep": "SKLSDE;;Department of Computer Science;School of Electrical Engineering and Computer Science", + "aff_unique_url": "http://www.buaa.edu.cn;;https://www.sheffield.ac.uk;https://www.uottawa.ca", + "aff_unique_abbr": ";;Sheffield;U Ottawa", + "aff_campus_unique_index": "0;0;2;3", + "aff_campus_unique": "Beijing;;Hangzhou;Ottawa", + "aff_country_unique_index": "0+0;0+0;0;1;2", + "aff_country_unique": "China;United Kingdom;Canada" + }, + { + "id": "2022.emnlp-main.531", + "title": "DuQM: A Chinese Dataset of Linguistically Perturbed Natural Questions for Evaluating the Robustness of Question Matching Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we focus on the robustness evaluation of Chinese Question Matching (QM) models. Most of the previous work on analyzing robustness issues focus on just one or a few types of artificial adversarial examples. Instead, we argue that a comprehensive evaluation should be conducted on natural texts, which takes into account the fine-grained linguistic capabilities of QM models. For this purpose, we create a Chinese dataset namely DuQM which contains natural questions with linguistic perturbations to evaluate the robustness of QM models. DuQM contains 3 categories and 13 subcategories with 32 linguistic perturbations. The extensive experiments demonstrate that DuQM has a better ability to distinguish different models. Importantly, the detailed breakdown of evaluation by the linguistic phenomena in DuQM helps us easily diagnose the strength and weakness of different models. Additionally, our experiment results show that the effect of artificial adversarial examples does not work on natural texts. Our baseline codes and a leaderboard are now publicly available.", + "author": "Hongyu Zhu; Yan Chen; Jing Yan; Jing Liu; Yu Hong; Ying Chen; Hua Wu; Haifeng Wang", + "authorids": "/h/hongyu-zhu/; /y/yan-chen/; /j/jing-yan/; /j/jing-liu/; /y/yu-hong/; /y/ying-chen/; /h/hua-wu/; /h/haifeng-wang/", + "bibtex": "@inproceedings{zhu-etal-2022-duqm,\n title = \"{D}u{QM}: A {C}hinese Dataset of Linguistically Perturbed Natural Questions for Evaluating the Robustness of Question Matching Models\",\n author = \"Zhu, Hongyu and\n Chen, Yan and\n Yan, Jing and\n Liu, Jing and\n Hong, Yu and\n Chen, Ying and\n Wu, Hua and\n Wang, Haifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.531/\",\n doi = \"10.18653/v1/2022.emnlp-main.531\",\n pages = \"7782--7794\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.531.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.531/", + "pdf_size": 671220, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9572666077275467276&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "School of Computer Science and Technology, Soochow University, China\u2660\u2663\u2020; Baidu Inc., Beijing, China\u2663\u2020; Baidu Inc., Beijing, China\u2663\u2020; Baidu Inc., Beijing, China\u2663\u2217; School of Computer Science and Technology, Soochow University, China\u2660\u2217; Baidu Inc., Beijing, China\u2663; Baidu Inc., Beijing, China\u2663; Baidu Inc., Beijing, China\u2663", + "aff_domain": "gmail.com;gmail.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com", + "email": "gmail.com;gmail.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com", + "github": "https://github.com/baidu/DuReader/tree/master/DuQM", + "project": "https://aistudio.baidu.com/aistudio/competition/detail/116/0/introduction", + "author_num": 8, + "aff_unique_index": "0;1;1;1;0;1;1;1", + "aff_unique_norm": "Soochow University;Baidu Inc.", + "aff_unique_dep": "School of Computer Science and Technology;", + "aff_unique_url": "https://eng.suda.edu.cn/;https://www.baidu.com", + "aff_unique_abbr": ";Baidu", + "aff_campus_unique_index": "1;1;1;1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.357", + "title": "DuReader-Retrieval: A Large-scale Chinese Benchmark for Passage Retrieval from Web Search Engine", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we present DuReader-retrieval, a large-scale Chinese dataset for passage retrieval. DuReader-retrieval contains more than 90K queries and over 8M unique passages from a commercial search engine. To alleviate the shortcomings of other datasets and ensure the quality of our benchmark, we (1) reduce the false negatives in development and test sets by manually annotating results pooled from multiple retrievers, and (2) remove the training queries that are semantically similar to the development and testing queries. Additionally, we provide two out-of-domain testing sets for cross-domain evaluation, as well as a set of human translated queries for for cross-lingual retrieval evaluation. The experiments demonstrate that DuReader-retrieval is challenging and a number of problems remain unsolved, such as the salient phrase mismatch and the syntactic mismatch between queries and paragraphs. These experiments also show that dense retrievers do not generalize well across domains, and cross-lingual retrieval is essentially challenging. DuReader-retrieval is publicly available at https://github.com/baidu/DuReader/tree/master/DuReader-Retrieval.", + "author": "Yifu Qiu; Hongyu Li; Yingqi Qu; Ying Chen; QiaoQiao She; Jing Liu; Hua Wu; Haifeng Wang", + "authorids": "/y/yifu-qiu/; /h/hongyu-li/; /y/yingqi-qu/; /y/ying-chen/; /q/qiaoqiao-she/; /j/jing-liu/; /h/hua-wu/; /h/haifeng-wang/", + "bibtex": "@inproceedings{qiu-etal-2022-dureader,\n title = \"{D}u{R}eader-Retrieval: A Large-scale {C}hinese Benchmark for Passage Retrieval from Web Search Engine\",\n author = \"Qiu, Yifu and\n Li, Hongyu and\n Qu, Yingqi and\n Chen, Ying and\n She, QiaoQiao and\n Liu, Jing and\n Wu, Hua and\n Wang, Haifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.357/\",\n doi = \"10.18653/v1/2022.emnlp-main.357\",\n pages = \"5326--5338\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.357.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.357/", + "pdf_size": 687160, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6900506585577074279&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Institute for Language, Cognition and Computation, University of Edinburgh, UK+Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China", + "aff_domain": "sms.ed.ac.uk;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com", + "email": "sms.ed.ac.uk;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com", + "github": "https://github.com/baidu/DuReader/tree/master/DuReader-Retrieval", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;1;1;1;1;1;1;1", + "aff_unique_norm": "University of Edinburgh;Baidu Inc.", + "aff_unique_dep": "Institute for Language, Cognition and Computation;", + "aff_unique_url": "https://www.ed.ac.uk;https://www.baidu.com", + "aff_unique_abbr": "Edinburgh;Baidu", + "aff_campus_unique_index": "1;1;1;1;1;1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+1;1;1;1;1;1;1;1", + "aff_country_unique": "United Kingdom;China" + }, + { + "id": "2022.findings-emnlp.132", + "title": "DualNER: A Dual-Teaching framework for Zero-shot Cross-lingual Named Entity Recognition", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We present DualNER, a simple and effective framework to make full use of both annotated source language corpus and unlabeled target language text for zero-shot cross-lingual named entity recognition (NER). In particular, we combine two complementary learning paradigms of NER, i.e., sequence labeling and span prediction, into a unified multi-task framework. After obtaining a sufficient NER model trained on the source data, we further train it on the target data in a dual-teaching manner, in which the pseudo-labels for one task are constructed from the prediction of the other task. Moreover, based on the span prediction, an entity-aware regularization is proposed to enhance the intrinsic cross-lingual alignment between the same entities in different languages. Experiments and analysis demonstrate the effectiveness of our DualNER.", + "author": "Jiali Zeng; Yufan Jiang; Yongjing Yin; Xu Wang; Binghuai Lin; Yunbo Cao", + "authorids": "/j/jiali-zeng/; /y/yufan-jiang/; /y/yongjing-yin/; /x/xu-wang/; /b/binghuai-lin/; /y/yunbo-cao/", + "bibtex": "@inproceedings{zeng-etal-2022-dualner,\n title = \"{D}ual{NER}: A Dual-Teaching framework for Zero-shot Cross-lingual Named Entity Recognition\",\n author = \"Zeng, Jiali and\n Jiang, Yufan and\n Yin, Yongjing and\n Wang, Xu and\n Lin, Binghuai and\n Cao, Yunbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.132/\",\n doi = \"10.18653/v1/2022.findings-emnlp.132\",\n pages = \"1837--1843\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.132.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.132/", + "pdf_size": 765384, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6452146897738074183&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Tencent Cloud Xiaowei, Beijing, China; Tencent Cloud Xiaowei, Beijing, China; Zhejiang University, Westlake University, Zhejiang, China; Tencent Cloud Xiaowei, Beijing, China; Tencent Cloud Xiaowei, Beijing, China; Tencent Cloud Xiaowei, Beijing, China", + "aff_domain": "tencent.com;tencent.com;westlake.edu.cn;tencent.com;tencent.com;tencent.com", + "email": "tencent.com;tencent.com;westlake.edu.cn;tencent.com;tencent.com;tencent.com", + "github": "https://github.com/lemon0830/dualNER", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "Tencent Cloud Xiaowei;Zhejiang University", + "aff_unique_dep": ";", + "aff_unique_url": "https://cloud.tencent.com;http://www.zju.edu.cn", + "aff_unique_abbr": "Tencent Cloud;ZJU", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.637", + "title": "Dungeons and Dragons as a Dialog Challenge for Artificial Intelligence", + "track": "main", + "status": "Main", + "award": false, + "abstract": "AI researchers have posited Dungeons and Dragons (D&D) as a challenge problem to test systems on various language-related capabilities. In this paper, we frame D&D specifically as a dialogue system challenge, where the tasks are to both generate the next conversational turn in the game and predict the state of the game given the dialogue history. We create a gameplay dataset consisting of nearly 900 games, with a total of 7,000 players, 800,000 dialogue turns, 500,000 dice rolls, and 58 million words. We automatically annotate the data with partial state information about the game play. We train a large language model (LM) to generate the next game turn, conditioning it on different information. The LM can respond as a particular character or as the player who runs the game\u2014i.e., the Dungeon Master (DM). It is trained to produce dialogue that is either in-character (roleplaying in the fictional world) or out-of-character (discussing rules or strategy). We perform a human evaluation to determine what factors make the generated output plausible and interesting. We further perform an automatic evaluation to determine how well the model can predict the game state given the history and examine how well tracking the game state improves its ability to produce plausible conversational output.", + "author": "Chris Callison-Burch; Gaurav Singh Tomar; Lara J. Martin; Daphne Ippolito; Suma Bailis; David Reitter", + "authorids": "/c/chris-callison-burch/; /g/gaurav-singh-tomar/; /l/lara-j-martin/; /d/daphne-ippolito/; /s/suma-bailis/; /d/david-reitter/", + "bibtex": "@inproceedings{callison-burch-etal-2022-dungeons,\n title = \"Dungeons and Dragons as a Dialog Challenge for Artificial Intelligence\",\n author = \"Callison-Burch, Chris and\n Tomar, Gaurav Singh and\n Martin, Lara J. and\n Ippolito, Daphne and\n Bailis, Suma and\n Reitter, David\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.637/\",\n doi = \"10.18653/v1/2022.emnlp-main.637\",\n pages = \"9379--9393\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.637.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.637/", + "pdf_size": 764192, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2433487514636631107&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 13, + "aff": "University of Pennsylvania; Google Research; University of Pennsylvania; University of Pennsylvania + Google Research; Google Research; Google Research", + "aff_domain": "upenn.edu;google.com; ;upenn.edu;google.com;google.com", + "email": "upenn.edu;google.com; ;upenn.edu;google.com;google.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0+1;1;1", + "aff_unique_norm": "University of Pennsylvania;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.upenn.edu;https://research.google", + "aff_unique_abbr": "UPenn;Google Research", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.2", + "title": "DynaMaR: Dynamic Prompt with Mask Token Representation", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Recent research has shown that large language models pretrained using unsupervised approaches can achieve significant performance improvement on many downstream tasks. Typically when adapting these language models to downstream tasks, like a classification or regression task, we employ a fine-tuning paradigm in which the sentence representation from the language model is input to a task-specific head; the model is then fine-tuned end-to-end. However, with the emergence of models like GPT-3, prompt-based fine-tuning has been proven to be a successful approach for few-shot tasks. Inspired by this work, we study discrete prompt technologies in practice. There are two issues that arise with the standard prompt approach. First, it can overfit on the prompt template. Second, it requires manual effort to formulate the downstream task as a language model problem. In this paper, we propose an improvement to prompt-based fine-tuning that addresses these two issues. We refer to our approach as DynaMaR \u2013 Dynamic Prompt with Mask Token Representation. Results show that DynaMaR can achieve an average improvement of 10% in few-shot settings and improvement of 3.7% in data-rich settings over the standard fine-tuning approach on four e-commerce applications.", + "author": "Xiaodi Sun; Sunny Rajagopalan; Priyanka Nigam; Weiyi Lu; Yi Xu; Iman Keivanloo; Belinda Zeng; Trishul Chilimbi", + "authorids": "/x/xiaodi-sun/; /s/sunny-rajagopalan/; /p/priyanka-nigam/; /w/weiyi-liu/; /y/yi-xu/; /i/iman-keivanloo/; /b/belinda-zeng/; /t/trishul-chilimbi/", + "bibtex": "@inproceedings{sun-etal-2022-dynamar,\n title = \"{D}yna{M}a{R}: Dynamic Prompt with Mask Token Representation\",\n author = \"Sun, Xiaodi and\n Rajagopalan, Sunny and\n Nigam, Priyanka and\n Lu, Weiyi and\n Xu, Yi and\n Keivanloo, Iman and\n Zeng, Belinda and\n Chilimbi, Trishul\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.2/\",\n doi = \"10.18653/v1/2022.emnlp-industry.2\",\n pages = \"9--17\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.2.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.2/", + "pdf_size": 551705, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5197737145429620668&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.356", + "title": "Dynamic Augmentation Data Selection for Few-shot Text Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Data augmentation has been a popular method for fine-tuning pre-trained language models to increase model robustness and performance. With augmentation data coming from modifying gold train data (in-sample augmentation) or being harvested from general domain unlabeled data (out-of-sample augmentation), the quality of such data is the key to successful fine-tuning. In this paper, we propose a dynamic data selection method to select effective augmentation data from different augmentation sources according to the model\u2019s learning stage, by identifying a set of augmentation samples that optimally facilitates the learning process of the most current model. The method firstly filters out augmentation samples with noisy pseudo labels through a curriculum learning strategy, then estimates the effectiveness of reserved augmentation data by its influence scores on the current model at every update, allowing the data selection process tightly tailored to model parameters. And the two-stage augmentation strategy considers in-sample augmentation and out-of-sample augmentation in different learning stages. Experiments with both kinds of augmentation data on a variety of sentence classification tasks show that our method outperforms strong baselines, proving the effectiveness of our method. Analysis confirms the dynamic nature of the data effectiveness and the importance of model learning stages in utilization of augmentation data.", + "author": "Guangliang Liu; Lifeng Jin; Owen Yuan; Jiayu Zhou", + "authorids": "/g/guangliang-liu/; /l/lifeng-jin/; /o/owen-yuan/; /j/jiayu-zhou/", + "bibtex": "@inproceedings{liu-etal-2022-dynamic-augmentation,\n title = \"Dynamic Augmentation Data Selection for Few-shot Text Classification\",\n author = \"Liu, Guangliang and\n Jin, Lifeng and\n Yuan, Owen and\n Zhou, Jiayu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.356/\",\n doi = \"10.18653/v1/2022.findings-emnlp.356\",\n pages = \"4841--4852\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.356.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.356/", + "pdf_size": 353740, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5425397170794595462&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Michigan State University; Tencent AI Lab; Lynbrook High School; Michigan State University", + "aff_domain": "msu.edu;gmail.com;student.fuhsd.org;msu.edu", + "email": "msu.edu;gmail.com;student.fuhsd.org;msu.edu", + "github": "https://github.com/illidanlab/DynSelector", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Michigan State University;Tencent;Lynbrook High School", + "aff_unique_dep": ";Tencent AI Lab;", + "aff_unique_url": "https://www.msu.edu;https://ai.tencent.com;", + "aff_unique_abbr": "MSU;Tencent AI Lab;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.748", + "title": "ECTSum: A New Benchmark Dataset For Bullet Point Summarization of Long Earnings Call Transcripts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite tremendous progress in automatic summarization, state-of-the-art methods are predominantly trained to excel in summarizing short newswire articles, or documents with strong layout biases such as scientific articles or government reports. Efficient techniques to summarize financial documents, discussing facts and figures, have largely been unexplored, majorly due to the unavailability of suitable datasets. In this work, we present ECTSum, a new dataset with transcripts of earnings calls (ECTs), hosted by publicly traded companies, as documents, and experts-written short telegram-style bullet point summaries derived from corresponding Reuters articles. ECTs are long unstructured documents without any prescribed length limit or format. We benchmark our dataset with state-of-the-art summarization methods across various metrics evaluating the content quality and factual consistency of the generated summaries. Finally, we present a simple yet effective approach, ECT-BPS, to generate a set of bullet points that precisely capture the important facts discussed in the calls.", + "author": "Rajdeep Mukherjee; Abhinav Bohra; Akash Banerjee; Soumya Sharma; Manjunath Hegde; Afreen Shaikh; Shivani Shrivastava; Koustuv Dasgupta; Niloy Ganguly; Saptarshi Ghosh; Pawan Goyal", + "authorids": "/r/rajdeep-mukherjee/; /a/abhinav-bohra/; /a/akash-banerjee/; /s/soumya-sharma/; /m/manjunath-hegde/; /a/afreen-shaikh/; /s/shivani-shrivastava/; /k/koustuv-dasgupta/; /n/niloy-ganguly/; /s/saptarshi-ghosh/; /p/pawan-goyal/", + "bibtex": "@inproceedings{mukherjee-etal-2022-ectsum,\n title = \"{ECTS}um: A New Benchmark Dataset For Bullet Point Summarization of Long Earnings Call Transcripts\",\n author = \"Mukherjee, Rajdeep and\n Bohra, Abhinav and\n Banerjee, Akash and\n Sharma, Soumya and\n Hegde, Manjunath and\n Shaikh, Afreen and\n Shrivastava, Shivani and\n Dasgupta, Koustuv and\n Ganguly, Niloy and\n Ghosh, Saptarshi and\n Goyal, Pawan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.748/\",\n doi = \"10.18653/v1/2022.emnlp-main.748\",\n pages = \"10893--10906\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.748.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.748/", + "pdf_size": 1169658, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4820204875572446839&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "Department of Computer Science and Engineering, IIT Kharagpur, India; Department of Computer Science and Engineering, IIT Kharagpur, India; Department of Computer Science and Engineering, IIT Kharagpur, India; Department of Computer Science and Engineering, IIT Kharagpur, India; Goldman Sachs Data Science and Machine Learning Group, India; Goldman Sachs Data Science and Machine Learning Group, India; Goldman Sachs Data Science and Machine Learning Group, India; Goldman Sachs Data Science and Machine Learning Group, India; Department of Computer Science and Engineering, IIT Kharagpur, India + Leibniz University of Hannover, Germany; Department of Computer Science and Engineering, IIT Kharagpur, India; Department of Computer Science and Engineering, IIT Kharagpur, India", + "aff_domain": "iitkgp.ac.in; ; ; ; ; ; ; ; ; ;", + "email": "iitkgp.ac.in; ; ; ; ; ; ; ; ; ;", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0;0;0;1;1;1;1;0+2;0;0", + "aff_unique_norm": "Indian Institute of Technology Kharagpur;Goldman Sachs;Leibniz University of Hannover", + "aff_unique_dep": "Department of Computer Science and Engineering;Data Science and Machine Learning Group;", + "aff_unique_url": "https://www.iitkgp.ac.in;https://www.goldmansachs.com;https://www.leibniz.uni-hannover.de", + "aff_unique_abbr": "IIT Kharagpur;GS;LUH", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Kharagpur;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0+1;0;0", + "aff_country_unique": "India;Germany" + }, + { + "id": "2022.emnlp-main.593", + "title": "EDIN: An End-to-end Benchmark and Pipeline for Unknown Entity Discovery and Indexing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing work on Entity Linking mostly assumes that the reference knowledge base is complete, and therefore all mentions can be linked. In practice this is hardly ever the case, as knowledge bases are incomplete and because novel concepts arise constantly. We introduce the temporally segmented Unknown Entity Discovery and Indexing (EDIN)-benchmark where unknown entities, that is entities not part of the knowledge base and without descriptions and labeled mentions, have to be integrated into an existing entity linking system. By contrasting EDIN with zero-shot entity linking, we provide insight on the additional challenges it poses. Building on dense-retrieval based entity linking, we introduce the end-to-end EDIN-pipeline that detects, clusters, and indexes mentions of unknown entities in context. Experiments show that indexing a single embedding per entity unifying the information of multiple mentions works better than indexing mentions independently.", + "author": "Nora Kassner; Fabio Petroni; Mikhail Plekhanov; Sebastian Riedel; Nicola Cancedda", + "authorids": "/n/nora-kassner/; /f/fabio-petroni/; /m/mikhail-plekhanov/; /s/sebastian-riedel/; /n/nicola-cancedda/", + "bibtex": "@inproceedings{kassner-etal-2022-edin,\n title = \"{EDIN}: An End-to-end Benchmark and Pipeline for Unknown Entity Discovery and Indexing\",\n author = \"Kassner, Nora and\n Petroni, Fabio and\n Plekhanov, Mikhail and\n Riedel, Sebastian and\n Cancedda, Nicola\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.593/\",\n doi = \"10.18653/v1/2022.emnlp-main.593\",\n pages = \"8659--8673\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.593.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.593/", + "pdf_size": 530918, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2523795583695689054&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Meta AI; Meta AI; Meta AI; Meta AI; Meta AI", + "aff_domain": "meta.com; ; ; ; ", + "email": "meta.com; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Meta Platforms, Inc.", + "aff_unique_dep": "Meta AI", + "aff_unique_url": "https://meta.com", + "aff_unique_abbr": "Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.68", + "title": "ELMER: A Non-Autoregressive Pre-trained Language Model for Efficient and Effective Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We study the text generation task under the approach of pre-trained language models (PLMs). Typically, an auto-regressive (AR) method is adopted for generating texts in a token-by-token manner. Despite many advantages of AR generation, it usually suffers from inefficient inference. Therefore, non-autoregressive (NAR) models are proposed to generate all target tokens simultaneously. However, NAR models usually generate texts of lower quality due to the absence of token dependency in the output text. In this paper, we propose ELMER: an efficient and effective PLM for NAR text generation to explicitly model the token dependency during NAR generation. By leveraging the early exit technique, ELMER enables the token generations at different layers, according to their prediction confidence (a more confident token will exit at a lower layer). Besides, we propose a novel pre-training objective, Layer Permutation Language Modeling, to pre-train ELMER by permuting the exit layer for each token in sequences. Experiments on three text generation tasks show that ELMER significantly outperforms NAR models and further narrows the performance gap with AR PLMs (ELMER (29.92) vs BART (30.61) ROUGE-L in XSUM) while achieving over 10 times inference speedup.", + "author": "Junyi Li; Tianyi Tang; Wayne Xin Zhao; Jian-Yun Nie; Ji-Rong Wen", + "authorids": "/j/junyi-li/; /t/tianyi-tang/; /w/wayne-xin-zhao/; /j/jian-yun-nie/; /j/ji-rong-wen/", + "bibtex": "@inproceedings{li-etal-2022-elmer,\n title = \"{ELMER}: A Non-Autoregressive Pre-trained Language Model for Efficient and Effective Text Generation\",\n author = \"Li, Junyi and\n Tang, Tianyi and\n Zhao, Wayne Xin and\n Nie, Jian-Yun and\n Wen, Ji-Rong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.68/\",\n doi = \"10.18653/v1/2022.emnlp-main.68\",\n pages = \"1044--1058\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.68.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.68/", + "pdf_size": 452408, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11979983300877548977&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Gaoling School of Artificial Intelligence, Renmin University of China+Beijing Key Laboratory of Big Data Management and Analysis Methods; Gaoling School of Artificial Intelligence, Renmin University of China+Beijing Key Laboratory of Big Data Management and Analysis Methods; Gaoling School of Artificial Intelligence, Renmin University of China+Beijing Key Laboratory of Big Data Management and Analysis Methods; DIRO, Universit\u00e9 de Montr\u00e9al; Gaoling School of Artificial Intelligence, Renmin University of China+School of Information, Renmin University of China+Beijing Key Laboratory of Big Data Management and Analysis Methods", + "aff_domain": "ruc.edu.cn;ruc.edu.cn;gmail.com; ; ", + "email": "ruc.edu.cn;ruc.edu.cn;gmail.com; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;2;0+0+1", + "aff_unique_norm": "Renmin University of China;Beijing Key Laboratory of Big Data Management and Analysis Methods;Universit\u00e9 de Montr\u00e9al", + "aff_unique_dep": "Gaoling School of Artificial Intelligence;Big Data Management and Analysis;DIRO", + "aff_unique_url": "http://www.ruc.edu.cn;;https://www.umontreal.ca", + "aff_unique_abbr": "RUC;;UdeM", + "aff_campus_unique_index": "0;0;0;2;0", + "aff_campus_unique": "Beijing;;Montr\u00e9al", + "aff_country_unique_index": "0+0;0+0;0+0;1;0+0+0", + "aff_country_unique": "China;Canada" + }, + { + "id": "2022.findings-emnlp.242", + "title": "ER-Test: Evaluating Explanation Regularization Methods for Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "By explaining how humans would solve a given task, human rationales can provide strong learning signal for neural language models (NLMs). Explanation regularization (ER) aims to improve NLM generalization by pushing the NLM\u2019s machine rationales (Which input tokens did the NLM focus on?) to align with human rationales (Which input tokens would humans focus on). Though prior works primarily study ER via in-distribution (ID) evaluation, out-of-distribution (OOD) generalization is often more critical in real-world scenarios, yet ER\u2019s effect on OOD generalization has been underexplored.In this paper, we introduce ER-Test, a framework for evaluating ER models\u2019 OOD generalization along three dimensions: unseen datasets, contrast set tests, and functional tests. Using ER-Test, we comprehensively analyze how ER models\u2019 OOD generalization varies with the rationale alignment criterion (loss function), human rationale type (instance-level v/s task-level), number and choice of rationale-annotated instances, and time budget for rationale annotation. Across two tasks and six datasets, we show that ER has little impact on ID performance but yields large OOD performance gains, with the best ER criterion being task-dependent. Also, ER can improve OOD performance even with task-level or few human rationales. Finally, we find that rationale annotation is more time-efficient than label annotation for improving OOD performance. Our results with ER-Test help demonstrate ER\u2019s utility and establish best practices for using ER effectively.", + "author": "Brihi Joshi; Aaron Chan; Ziyi Liu; Shaoliang Nie; Maziar Sanjabi; Hamed Firooz; Xiang Ren", + "authorids": "/b/brihi-joshi/; /a/aaron-chan/; /z/ziyi-liu/; /s/shaoliang-nie/; /m/maziar-sanjabi/; /h/hamed-firooz/; /x/xiang-ren/", + "bibtex": "@inproceedings{joshi-etal-2022-er,\n title = \"{ER}-Test: Evaluating Explanation Regularization Methods for Language Models\",\n author = \"Joshi, Brihi and\n Chan, Aaron and\n Liu, Ziyi and\n Nie, Shaoliang and\n Sanjabi, Maziar and\n Firooz, Hamed and\n Ren, Xiang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.242/\",\n doi = \"10.18653/v1/2022.findings-emnlp.242\",\n pages = \"3315--3336\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.242.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.242/", + "pdf_size": 3151162, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9166019267105413161&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "University of Southern California\u2663; University of Southern California\u2663; University of Southern California\u2663; Meta AI\u2662; Meta AI\u2662; Meta AI\u2662; University of Southern California\u2663", + "aff_domain": "usc.edu;usc.edu;usc.edu;fb.com;fb.com;fb.com;usc.edu", + "email": "usc.edu;usc.edu;usc.edu;fb.com;fb.com;fb.com;usc.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;1;1;0", + "aff_unique_norm": "University of Southern California;Meta Platforms, Inc.", + "aff_unique_dep": ";Meta AI", + "aff_unique_url": "https://www.usc.edu;https://meta.com", + "aff_unique_abbr": "USC;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.274", + "title": "ERNIE-Layout: Layout Knowledge Enhanced Pre-training for Visually-rich Document Understanding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent years have witnessed the rise and success of pre-training techniques in visually-rich document understanding. However, most existing methods lack the systematic mining and utilization of layout-centered knowledge, leading to sub-optimal performances. In this paper, we propose ERNIE-Layout, a novel document pre-training solution with layout knowledge enhancement in the whole workflow, to learn better representations that combine the features from text, layout, and image. Specifically, we first rearrange input sequences in the serialization stage, and then present a correlative pre-training task, reading order prediction, to learn the proper reading order of documents. To improve the layout awareness of the model, we integrate a spatial-aware disentangled attention into the multi-modal transformer and a replaced regions prediction task into the pre-training phase. Experimental results show that ERNIE-Layout achieves superior performance on various downstream tasks, setting new state-of-the-art on key information extraction, document image classification, and document question answering datasets. The code and models are publicly available at PaddleNLP.", + "author": "Qiming Peng; Yinxu Pan; Wenjin Wang; Bin Luo; Zhenyu Zhang; Zhengjie Huang; Yuhui Cao; Weichong Yin; Yongfeng Chen; Yin Zhang; Shikun Feng; Yu Sun; Hao Tian; Hua Wu; Haifeng Wang", + "authorids": "/q/qiming-peng/; /y/yinxu-pan/; /w/wenjin-wang/; /b/bin-luo/; /z/zhenyu-zhang/; /z/zhengjie-huang/; /y/yuhui-cao/; /w/weichong-yin/; /y/yongfeng-chen/; /y/yin-zhang/; /s/shikun-feng/; /y/yu-sun/; /h/hao-tian/; /h/hua-wu/; /h/haifeng-wang/", + "bibtex": "@inproceedings{peng-etal-2022-ernie,\n title = \"{ERNIE}-Layout: Layout Knowledge Enhanced Pre-training for Visually-rich Document Understanding\",\n author = \"Peng, Qiming and\n Pan, Yinxu and\n Wang, Wenjin and\n Luo, Bin and\n Zhang, Zhenyu and\n Huang, Zhengjie and\n Cao, Yuhui and\n Yin, Weichong and\n Chen, Yongfeng and\n Zhang, Yin and\n Feng, Shikun and\n Sun, Yu and\n Tian, Hao and\n Wu, Hua and\n Wang, Haifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.274/\",\n doi = \"10.18653/v1/2022.findings-emnlp.274\",\n pages = \"3744--3756\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.274.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.274/", + "pdf_size": 4207827, + "gs_citation": 86, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2243893068355618660&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Zhejiang University, Hangzhou, China + Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Zhejiang University, Hangzhou, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China", + "aff_domain": "baidu.com;baidu.com;zju.edu.cn;baidu.com;baidu.com; ; ;baidu.com; ;zju.edu.cn;baidu.com;baidu.com; ;baidu.com;baidu.com", + "email": "baidu.com;baidu.com;zju.edu.cn;baidu.com;baidu.com; ; ;baidu.com; ;zju.edu.cn;baidu.com;baidu.com; ;baidu.com;baidu.com", + "github": "https://github.com/PaddlePaddle/PaddleNLP/tree/release/2.4/model_zoo/ernie-layout", + "project": "", + "author_num": 15, + "aff_unique_index": "0;0;1+0;0;0;0;0;0;0;1;0;0;0;0;0", + "aff_unique_norm": "Baidu Inc.;Zhejiang University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.baidu.com;http://www.zju.edu.cn", + "aff_unique_abbr": "Baidu;ZJU", + "aff_campus_unique_index": "0;0;1+0;0;0;0;0;0;0;1;0;0;0;0;0", + "aff_campus_unique": "Beijing;Hangzhou", + "aff_country_unique_index": "0;0;0+0;0;0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.519", + "title": "EUR-Lex-Sum: A Multi- and Cross-lingual Dataset for Long-form Summarization in the Legal Domain", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing summarization datasets come with two main drawbacks: (1) They tend to focus on overly exposed domains, such as news articles or wiki-like texts, and (2) are primarily monolingual, with few multilingual datasets.In this work, we propose a novel dataset, called EUR-Lex-Sum, based on manually curated document summaries of legal acts from the European Union law platform (EUR-Lex). Documents and their respective summaries exist as cross-lingual paragraph-aligned data in several of the 24 official European languages, enabling access to various cross-lingual and lower-resourced summarization setups. We obtain up to 1,500 document/summary pairs per language, including a subset of 375 cross-lingually aligned legal acts with texts available in *all* 24 languages. In this work, the data acquisition process is detailed and key characteristics of the resource are compared to existing summarization resources. In particular, we illustrate challenging sub-problems and open questions on the dataset that could help the facilitation of future research in the direction of domain-specific cross-lingual summarization.Limited by the extreme length and language diversity of samples, we further conduct experiments with suitable extractive monolingual and cross-lingual baselines for future work. Code for the extraction as well as access to our data and baselines is available online at: [https://github.com/achouhan93/eur-lex-sum](https://github.com/achouhan93/eur-lex-sum).", + "author": "Dennis Aumiller; Ashish Chouhan; Michael Gertz", + "authorids": "/d/dennis-aumiller/; /a/ashish-chouhan/; /m/michael-gertz/", + "bibtex": "@inproceedings{aumiller-etal-2022-eur,\n title = \"{EUR}-Lex-Sum: A Multi- and Cross-lingual Dataset for Long-form Summarization in the Legal Domain\",\n author = \"Aumiller, Dennis and\n Chouhan, Ashish and\n Gertz, Michael\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.519/\",\n doi = \"10.18653/v1/2022.emnlp-main.519\",\n pages = \"7626--7639\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.519.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.519/", + "pdf_size": 814433, + "gs_citation": 45, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6522827719239658951&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Institute of Computer Science, Heidelberg University+School of Information, Media and Design, SRH Hochschule Heidelberg; Institute of Computer Science, Heidelberg University+School of Information, Media and Design, SRH Hochschule Heidelberg; Institute of Computer Science, Heidelberg University", + "aff_domain": "informatik.uni-heidelberg.de;informatik.uni-heidelberg.de;informatik.uni-heidelberg.de", + "email": "informatik.uni-heidelberg.de;informatik.uni-heidelberg.de;informatik.uni-heidelberg.de", + "github": "https://github.com/achouhan93/eur-lex-sum", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0", + "aff_unique_norm": "Heidelberg University;SRH Hochschule Heidelberg", + "aff_unique_dep": "Institute of Computer Science;School of Information, Media and Design", + "aff_unique_url": "https://www.uni-heidelberg.de;https://www.srh-heidelberg.de", + "aff_unique_abbr": "Uni Heidelberg;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Heidelberg;", + "aff_country_unique_index": "0+0;0+0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.479", + "title": "Early Guessing for Dialect Identification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This paper deals with the problem of incre-mental dialect identification. Our goal is toreliably determine the dialect before the fullutterance is given as input. The major partof the previous research on dialect identification has been model-centric, focusing on performance. We address a new question: How much input is needed to identify a dialect? Ourapproach is a data-centric analysis that resultsin general criteria for finding the shortest inputneeded to make a plausible guess. Workingwith three sets of language dialects (Swiss German, Indo-Aryan and Arabic languages), weshow that it is possible to generalize across dialects and datasets with two input shorteningcriteria: model confidence and minimal inputlength (adjusted for the input type). The sourcecode for experimental analysis can be found atGithub.", + "author": "Vani Kanjirangat; Tanja Samardzic; Fabio Rinaldi; Ljiljana Dolamic", + "authorids": "/v/vani-kanjirangat/; /t/tanja-samardzic/; /f/fabio-rinaldi/; /l/ljiljana-dolamic/", + "bibtex": "@inproceedings{kanjirangat-etal-2022-early,\n title = \"Early Guessing for Dialect Identification\",\n author = \"Kanjirangat, Vani and\n Samardzic, Tanja and\n Rinaldi, Fabio and\n Dolamic, Ljiljana\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.479/\",\n doi = \"10.18653/v1/2022.findings-emnlp.479\",\n pages = \"6417--6426\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.479.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.479/", + "pdf_size": 218783, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1253935166249740918&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "IDSIA-USI/SUPSI, Switzerland; URPP Language and Space, UZH; IDSIA-USI/SUPSI, Switzerland; armasuisse S+T, Switzerland", + "aff_domain": "idsia.ch;uzh.ch;idsia.ch;armasuisse.ch", + "email": "idsia.ch;uzh.ch;idsia.ch;armasuisse.ch", + "github": "https://github.com/vanikanjirangat/Dialect_Early_Guessing", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2+3", + "aff_unique_norm": "IDSIA-USI/SUPSI;University of Zurich;armasuisse;T (unknown institution)", + "aff_unique_dep": ";URPP Language and Space;;", + "aff_unique_url": "https://www.idsia.ch;https://www.uzh.ch;https://www.armasuisse.ch;", + "aff_unique_abbr": ";UZH;armasuisse;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "Switzerland" + }, + { + "id": "2022.emnlp-main.741", + "title": "EdgeFormer: A Parameter-Efficient Transformer for On-Device Seq2seq Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We introduce EdgeFormer \u2013 a parameter-efficient Transformer for on-device seq2seq generation under the strict computation and memory constraints. Compared with the previous parameter-efficient Transformers, EdgeFormer applies two novel principles for cost-effective parameterization, allowing it to perform better given the same parameter budget; moreover, EdgeFormer is further enhanced by layer adaptation innovation that is proposed for improving the network with shared layers.Extensive experiments show EdgeFormer can effectively outperform previous parameter-efficient Transformer baselines and achieve competitive results under both the computation and memory constraints. Given the promising results, we release EdgeLM \u2013 the pretrained version of EdgeFormer, which is the first publicly available pretrained on-device seq2seq model that can be easily fine-tuned for seq2seq tasks with strong results, facilitating on-device seq2seq generation in practice.", + "author": "Tao Ge; Si-Qing Chen; Furu Wei", + "authorids": "/t/tao-ge/; /s/si-qing-chen/; /f/furu-wei/", + "bibtex": "@inproceedings{ge-etal-2022-edgeformer,\n title = \"{E}dge{F}ormer: A Parameter-Efficient Transformer for On-Device Seq2seq Generation\",\n author = \"Ge, Tao and\n Chen, Si-Qing and\n Wei, Furu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.741/\",\n doi = \"10.18653/v1/2022.emnlp-main.741\",\n pages = \"10786--10798\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.741.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.741/", + "pdf_size": 1336086, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9421025661982055503&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Microsoft; Microsoft; Microsoft", + "aff_domain": "microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com", + "github": "https://github.com/microsoft/unilm/tree/master/edgelm", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Microsoft Corporation", + "aff_unique_dep": "", + "aff_unique_url": "https://www.microsoft.com", + "aff_unique_abbr": "Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.156", + "title": "EdiT5: Semi-Autoregressive Text Editing with T5 Warm-Start", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We present EdiT5 - a novel semi-autoregressive text-editing approach designed to combine the strengths of non-autoregressive text-editing and autoregressive decoding. EdiT5 is faster at inference times than conventional sequence-to-sequence (seq2seq) models, while being capable of modeling flexible input-output transformations.This is achieved by decomposing the generation process into three sub-tasks: (1) tagging to decide on the subset of input tokens to be preserved in the output, (2) re-ordering to define their order in the output text, and (3) insertion to infill the missing tokens that are not present in the input. The tagging and re-ordering steps, which are responsible for generating the largest portion of the output, are non-autoregressive, while the insertion uses an autoregressive decoder.Depending on the task, EdiT5 requires significantly fewer autoregressive steps demonstrating speedups of up to 25x when compared to classic seq2seq models. Quality-wise, EdiT5 is initialized with a pre-trained T5 checkpoint yielding comparable performance to T5 in high-resource settings and clearly outperforms it on low-resource settings when evaluated on three NLG tasks: Sentence Fusion, Grammatical Error Correction, and Decontextualization.", + "author": "Jonathan Mallinson; Jakub Adamek; Eric Malmi; Aliaksei Severyn", + "authorids": "/j/jonathan-mallinson/; /j/jakub-adamek/; /e/eric-malmi/; /a/aliaksei-severyn/", + "bibtex": "@inproceedings{mallinson-etal-2022-edit5,\n title = \"{E}di{T}5: Semi-Autoregressive Text Editing with T5 Warm-Start\",\n author = \"Mallinson, Jonathan and\n Adamek, Jakub and\n Malmi, Eric and\n Severyn, Aliaksei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.156/\",\n doi = \"10.18653/v1/2022.findings-emnlp.156\",\n pages = \"2126--2138\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.156.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.156/", + "pdf_size": 346465, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8682665198886865596&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 3, + "aff": "Google Research; Google Research; Google Research; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com", + "github": "", + "project": "https://edit5.page.link/code", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.492", + "title": "Eeny, meeny, miny, moe. How to choose data for morphological inflection.", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Data scarcity is a widespread problem for numerous natural language processing (NLP) tasks within low-resource languages. Within morphology, the labour-intensive task of tagging/glossing data is a serious bottleneck for both NLP and fieldwork. Active learning (AL) aims to reduce the cost of data annotation by selecting data that is most informative for the model. In this paper, we explore four sampling strategies for the task of morphological inflection using a Transformer model: a pair of oracle experiments where data is chosen based on correct/incorrect predictions by the model, model confidence, entropy, and random selection. We investigate the robustness of each sampling strategy across 30 typologically diverse languages, as well as a 10-cycle iteration using Nat\u00fcgu as a case study. Our results show a clear benefit to selecting data based on model confidence. Unsurprisingly, the oracle experiment, which is presented as a proxy for linguist/language informer feedback, shows the most improvement. This is followed closely by low-confidence and high-entropy forms. We also show that despite the conventional wisdom of larger data sets yielding better accuracy, introducing more instances of high-confidence, low-entropy, or forms that the model can already inflect correctly, can reduce model performance.", + "author": "Saliha Muradoglu; Mans Hulden", + "authorids": "/s/saliha-muradoglu/; /m/mans-hulden/", + "bibtex": "@inproceedings{muradoglu-hulden-2022-eeny,\n title = \"Eeny, meeny, miny, moe. How to choose data for morphological inflection.\",\n author = \"Muradoglu, Saliha and\n Hulden, Mans\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.492/\",\n doi = \"10.18653/v1/2022.emnlp-main.492\",\n pages = \"7294--7303\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.492.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.492/", + "pdf_size": 252007, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13941207284345451442&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff": "The Australian National University (ANU) + ARC Centre of Excellence for the Dynamics of Language (CoEDL); University of Colorado", + "aff_domain": "anu.edu.au;colorado.edu", + "email": "anu.edu.au;colorado.edu", + "github": "", + "project": "http://fieldmanuals.mpi.nl/", + "author_num": 2, + "aff_unique_index": "0+1;2", + "aff_unique_norm": "Australian National University;ARC Centre of Excellence for the Dynamics of Language;University of Colorado", + "aff_unique_dep": ";Centre of Excellence for the Dynamics of Language;", + "aff_unique_url": "https://www.anu.edu.au;https://www.dynamichlanguage.edu.au;https://www.colorado.edu", + "aff_unique_abbr": "ANU;CoEDL;CU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;1", + "aff_country_unique": "Australia;United States" + }, + { + "id": "2022.findings-emnlp.405", + "title": "Effective Pretraining Objectives for Transformer-based Autoencoders", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this paper, we study trade-offs between efficiency, cost and accuracy when pre-training Transformer encoders with different pre-training objectives. For this purpose, we analyze features of common objectives and combine them to create new effective pre-training approaches. Specifically, we designed light token generators based on a straightforward statistical approach, which can replace ELECTRA computationally heavy generators, thus highly reducing cost. Our experiments also show that (i) there are more efficient alternatives to BERT\u2019s MLM, and (ii) it is possible to efficiently pre-train Transformer-based models using lighter generators without a significant drop in performance.", + "author": "Luca Di Liello; Matteo Gabburo; Alessandro Moschitti", + "authorids": "/l/luca-di-liello/; /m/matteo-gabburo/; /a/alessandro-moschitti/", + "bibtex": "@inproceedings{di-liello-etal-2022-effective,\n title = \"Effective Pretraining Objectives for Transformer-based Autoencoders\",\n author = \"Di Liello, Luca and\n Gabburo, Matteo and\n Moschitti, Alessandro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.405/\",\n doi = \"10.18653/v1/2022.findings-emnlp.405\",\n pages = \"5533--5547\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.405.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.405/", + "pdf_size": 648184, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=648619500963412445&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "University of Trento; University of Trento; Amazon Alexa AI", + "aff_domain": "unitn.it;unitn.it;amazon.com", + "email": "unitn.it;unitn.it;amazon.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "University of Trento;Amazon", + "aff_unique_dep": ";Alexa AI", + "aff_unique_url": "https://www.unitn.it;https://www.amazon.com", + "aff_unique_abbr": "UniTN;Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "Italy;United States" + }, + { + "id": "2022.emnlp-main.197", + "title": "Effective and Efficient Query-aware Snippet Extraction for Web Search", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Query-aware webpage snippet extraction is widely used in search engines to help users better understand the content of the returned webpages before clicking. The extracted snippet is expected to summarize the webpage in the context of the input query. Existing snippet extraction methods mainly rely on handcrafted features of overlapping words, which cannot capture deep semantic relationships between the query and webpages. Another idea is to extract the sentences which are most relevant to queries as snippets with existing text matching methods. However, these methods ignore the contextual information of webpages, which may be sub-optimal. In this paper, we propose an effective query-aware webpage snippet extraction method named DeepQSE. In DeepQSE, the concatenation of title, query and each candidate sentence serves as an input of query-aware sentence encoder, aiming to capture the fine-grained relevance between the query and sentences. Then, these query-aware sentence representations are modeled jointly through a document-aware relevance encoder to capture contextual information of the webpage. Since the query and each sentence are jointly modeled in DeepQSE, its online inference may be slow. Thus, we further propose an efficient version of DeepQSE, named Efficient-DeepQSE, which can significantly improve the inference speed of DeepQSE without affecting its performance. The core idea of Efficient-DeepQSE is to decompose the query-aware snippet extraction task into two stages, i.e., a coarse-grained candidate sentence selection stage where sentence representations can be cached, and a fine-grained relevance modeling stage. Experiments on two datasets validate the effectiveness and efficiency of our methods.", + "author": "Jingwei Yi; Fangzhao Wu; Chuhan Wu; Xiaolong Huang; Binxing Jiao; Guangzhong Sun; Xing Xie", + "authorids": "/j/jingwei-yi/; /f/fangzhao-wu/; /c/chuhan-wu/; /x/xiaolong-huang/; /b/binxing-jiao/; /g/guangzhong-sun/; /x/xing-xie/", + "bibtex": "@inproceedings{yi-etal-2022-effective,\n title = \"Effective and Efficient Query-aware Snippet Extraction for Web Search\",\n author = \"Yi, Jingwei and\n Wu, Fangzhao and\n Wu, Chuhan and\n Huang, Xiaolong and\n Jiao, Binxing and\n Sun, Guangzhong and\n Xie, Xing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.197/\",\n doi = \"10.18653/v1/2022.emnlp-main.197\",\n pages = \"3035--3046\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.197.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.197/", + "pdf_size": 697298, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8343261648600000592&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "University of Science and Technology of China; Microsoft Research Asia; Tsinghua University; Microsoft STC Asia; Microsoft STC Asia; University of Science and Technology of China; Microsoft Research Asia", + "aff_domain": "mail.ustc.edu.cn;gmail.com;gmail.com;microsoft.com;microsoft.com;ustc.edu.cn;microsoft.com", + "email": "mail.ustc.edu.cn;gmail.com;gmail.com;microsoft.com;microsoft.com;ustc.edu.cn;microsoft.com", + "github": "https://github.com/yjw1029/DeepQSE", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;3;0;1", + "aff_unique_norm": "University of Science and Technology of China;Microsoft Research;Tsinghua University;Microsoft", + "aff_unique_dep": ";Research;;STC", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/asia;https://www.tsinghua.edu.cn;https://www.microsoft.com", + "aff_unique_abbr": "USTC;MSR Asia;THU;MS", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.518", + "title": "Efficient (Soft) Q-Learning for Text Generation with Limited Good Data", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Maximum likelihood estimation (MLE) is the predominant algorithm for training text generation models. This paradigm relies on direct supervision examples, which is not applicable to many emerging applications, such as generating adversarial attacks or generating prompts to control language models. Reinforcement learning (RL) on the other hand offers a more flexible solution by allowing users to plug in arbitrary task metrics as reward. Yet previous RL algorithms for text generation, such as policy gradient (on-policy RL) and Q-learning (off-policy RL), are often notoriously inefficient or unstable to train due to the large sequence space and the sparse reward received only at the end of sequences. In this paper, we introduce a new RL formulation for text generation from the soft Q-learning (SQL) perspective. It enables us to draw from the latest RL advances, such as path consistency learning, to combine the best of on-/off-policy updates, and learn effectively from sparse reward. We apply the approach to a wide range of novel text generation tasks, including learning from noisy/negative examples, adversarial attacks, and prompt generation. Experiments show our approach consistently outperforms both task-specialized algorithms and the previous RL methods.", + "author": "Han Guo; Bowen Tan; Zhengzhong Liu; Eric Xing; Zhiting Hu", + "authorids": "/h/han-guo/; /b/bowen-tan/; /z/zhengzhong-liu/; /e/eric-xing/; /z/zhiting-hu/", + "bibtex": "@inproceedings{guo-etal-2022-efficient,\n title = \"Efficient (Soft) {Q}-Learning for Text Generation with Limited Good Data\",\n author = \"Guo, Han and\n Tan, Bowen and\n Liu, Zhengzhong and\n Xing, Eric and\n Hu, Zhiting\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.518/\",\n doi = \"10.18653/v1/2022.findings-emnlp.518\",\n pages = \"6969--6991\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.518.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.518/", + "pdf_size": 3434915, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3406332181383871970&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Carnegie Mellon University+Petuum Inc.; Carnegie Mellon University+Petuum Inc.+Mohamed bin Zayed University of Artificial Intelligence; Carnegie Mellon University+Petuum Inc.+Mohamed bin Zayed University of Artificial Intelligence; Carnegie Mellon University+Petuum Inc.+Mohamed bin Zayed University of Artificial Intelligence; UC San Diego", + "aff_domain": "cs.cmu.edu;cs.cmu.edu;gmail.com;cs.cmu.edu;ucsd.edu", + "email": "cs.cmu.edu;cs.cmu.edu;gmail.com;cs.cmu.edu;ucsd.edu", + "github": "https://github.com/HanGuo97/soft-Q-learning-for-text-generation", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1+2;0+1+2;0+1+2;3", + "aff_unique_norm": "Carnegie Mellon University;Petuum Inc.;Mohamed bin Zayed University of Artificial Intelligence;University of California, San Diego", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.cmu.edu;https://www.petuum.com;https://www.mbzuai.ac.ae;https://www.ucsd.edu", + "aff_unique_abbr": "CMU;;MBZUAI;UCSD", + "aff_campus_unique_index": ";;;;1", + "aff_campus_unique": ";San Diego", + "aff_country_unique_index": "0+0;0+0+1;0+0+1;0+0+1;0", + "aff_country_unique": "United States;United Arab Emirates" + }, + { + "id": "2022.emnlp-main.569", + "title": "Efficient Adversarial Training with Robust Early-Bird Tickets", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Adversarial training is one of the most powerful methods to improve the robustness of pre-trained language models (PLMs). However, this approach is typically more expensive than traditional fine-tuning because of the necessity to generate adversarial examples via gradient descent. Delving into the optimization process of adversarial training, we find that robust connectivity patterns emerge in the early training phase (typically 0.15~0.3 epochs), far before parameters converge. Inspired by this finding, we dig out robust early-bird tickets (i.e., subnetworks) to develop an efficient adversarial training method: (1) searching for robust tickets with structured sparsity in the early stage; (2) fine-tuning robust tickets in the remaining time. To extract the robust tickets as early as possible, we design a ticket convergence metric to automatically terminate the searching process. Experiments show that the proposed efficient adversarial training method can achieve up to 7\u00d7 \u223c 13 \u00d7 training speedups while maintaining comparable or even better robustness compared to the most competitive state-of-the-art adversarial training methods.", + "author": "Zhiheng Xi; Rui Zheng; Tao Gui; Qi Zhang; Xuanjing Huang", + "authorids": "/z/zhiheng-xi/; /r/rui-zheng/; /t/tao-gui/; /q/qi-zhang/; /x/xuan-jing-huang/", + "bibtex": "@inproceedings{xi-etal-2022-efficient,\n title = \"Efficient Adversarial Training with Robust Early-Bird Tickets\",\n author = \"Xi, Zhiheng and\n Zheng, Rui and\n Gui, Tao and\n Zhang, Qi and\n Huang, Xuanjing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.569/\",\n doi = \"10.18653/v1/2022.emnlp-main.569\",\n pages = \"8318--8331\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.569.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.569/", + "pdf_size": 640985, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17163497572443527525&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "School of Computer Science, Fudan University, Shanghai, China; School of Computer Science, Fudan University, Shanghai, China; Institute of Modern Languages and Linguistics, Fudan University, Shanghai, China; School of Computer Science, Fudan University, Shanghai, China; School of Computer Science, Fudan University, Shanghai, China + International Human Phenome Institutes (Shanghai), Shanghai, China", + "aff_domain": "m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "m.fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0+1", + "aff_unique_norm": "Fudan University;International Human Phenome Institutes", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "https://www.fudan.edu.cn;", + "aff_unique_abbr": "Fudan;", + "aff_campus_unique_index": "0;0;0;0;0+0", + "aff_campus_unique": "Shanghai", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.54", + "title": "Efficient Document Retrieval by End-to-End Refining and Quantizing BERT Embedding with Contrastive Product Quantization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Efficient document retrieval heavily relies on the technique of semantic hashing, which learns a binary code for every document and employs Hamming distance to evaluate document distances. However, existing semantic hashing methods are mostly established on outdated TFIDF features, which obviously do not contain lots of important semantic information about documents. Furthermore, the Hamming distance can only be equal to one of several integer values, significantly limiting its representational ability for document distances. To address these issues, in this paper, we propose to leverage BERT embeddings to perform efficient retrieval based on the product quantization technique, which will assign for every document a real-valued codeword from the codebook, instead of a binary code as in semantic hashing. Specifically, we first transform the original BERT embeddings via a learnable mapping and feed the transformed embedding into a probabilistic product quantization module to output the assigned codeword. The refining and quantizing modules can be optimized in an end-to-end manner by minimizing the probabilistic contrastive loss. A mutual information maximization based method is further proposed to improve the representativeness of codewords, so that documents can be quantized more accurately. Extensive experiments conducted on three benchmarks demonstrate that our proposed method significantly outperforms current state-of-the-art baselines.", + "author": "Zexuan Qiu; Qinliang Su; Jianxing Yu; Shijing Si", + "authorids": "/z/zexuan-qiu/; /q/qinliang-su/; /j/jianxing-yu/; /s/shijing-si/", + "bibtex": "@inproceedings{qiu-etal-2022-efficient,\n title = \"Efficient Document Retrieval by End-to-End Refining and Quantizing {BERT} Embedding with Contrastive Product Quantization\",\n author = \"Qiu, Zexuan and\n Su, Qinliang and\n Yu, Jianxing and\n Si, Shijing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.54/\",\n doi = \"10.18653/v1/2022.emnlp-main.54\",\n pages = \"853--863\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.54.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.54/", + "pdf_size": 333875, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18288844341691227917&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China+Guangdong Key Laboratory of Big Data Analysis and Processing, Guangzhou, China; School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China+Guangdong Key Laboratory of Big Data Analysis and Processing, Guangzhou, China; School of Artificial Intelligence, Sun Yat-sen University, Guangzhou, China; School of Economics and Finance, Shanghai International Studies University, China", + "aff_domain": "mail2.sysu.edu.cn;mail.sysu.edu.cn;mail.sysu.edu.cn; ", + "email": "mail2.sysu.edu.cn;mail.sysu.edu.cn;mail.sysu.edu.cn; ", + "github": "https://github.com/qiuzx2/MICPQ", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0;2", + "aff_unique_norm": "Sun Yat-sen University;Guangdong Key Laboratory of Big Data Analysis and Processing;Shanghai International Studies University", + "aff_unique_dep": "School of Computer Science and Engineering;;School of Economics and Finance", + "aff_unique_url": "http://www.sysu.edu.cn;;http://www.sisu.edu.cn", + "aff_unique_abbr": "SYSU;;", + "aff_campus_unique_index": "0+0;0+0;0", + "aff_campus_unique": "Guangzhou;", + "aff_country_unique_index": "0+0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.43", + "title": "Efficient Federated Learning on Knowledge Graphs via Privacy-preserving Relation Embedding Aggregation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Federated learning (FL) can be essential in knowledge representation, reasoning, and data mining applications over multi-source knowledge graphs (KGs). A recent study FedE first proposes an FL framework that shares entity embeddings of KGs across all clients. However, entity embedding sharing from FedE would incur a severe privacy leakage. Specifically, the known entity embedding can be used to infer whether a specific relation between two entities exists in a private client. In this paper, we introduce a novel attack method that aims to recover the original data based on the embedding information, which is further used to evaluate the vulnerabilities of FedE. Furthermore, we propose a Federated learning paradigm with privacy-preserving Relation embedding aggregation (FedR) to tackle the privacy issue in FedE. Besides, relation embedding sharing can significantly reduce the communication cost due to its smaller size of queries. We conduct extensive experiments to evaluate FedR with five different KG embedding models and three datasets. Compared to FedE, FedR achieves similar utility and significant improvements regarding privacy-preserving effect and communication efficiency on the link prediction task.", + "author": "Kai Zhang; Yu Wang; Hongyi Wang; Lifu Huang; Carl Yang; Xun Chen; Lichao Sun", + "authorids": "/k/kai-zhang/; /y/yu-wang/; /h/hongyi-wang/; /l/lifu-huang/; /c/carl-yang/; /x/xun-chen/; /l/lichao-sun/", + "bibtex": "@inproceedings{zhang-etal-2022-efficient-federated,\n title = \"Efficient Federated Learning on Knowledge Graphs via Privacy-preserving Relation Embedding Aggregation\",\n author = \"Zhang, Kai and\n Wang, Yu and\n Wang, Hongyi and\n Huang, Lifu and\n Yang, Carl and\n Chen, Xun and\n Sun, Lichao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.43/\",\n doi = \"10.18653/v1/2022.findings-emnlp.43\",\n pages = \"613--621\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.43.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.43/", + "pdf_size": 486368, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3871314520486972167&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Lehigh University; University of Illinois Chicago; Carnegie Mellon University; Virginia Tech; Emory University; Samsung Research America; Lehigh University", + "aff_domain": "lehigh.edu;uic.edu;andrew.cmu.edu;vt.edu;emory.edu;samsung.com;lehigh.edu", + "email": "lehigh.edu;uic.edu;andrew.cmu.edu;vt.edu;emory.edu;samsung.com;lehigh.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;4;5;0", + "aff_unique_norm": "Lehigh University;University of Illinois at Chicago;Carnegie Mellon University;Virginia Tech;Emory University;Samsung Research America", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "https://www.lehigh.edu;https://www.uic.edu;https://www.cmu.edu;https://www.vt.edu;https://www.emory.edu;https://www.samsung.com/us/careers/research/", + "aff_unique_abbr": "Lehigh;UIC;CMU;VT;Emory;SRA", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Chicago", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "", + "title": "Efficient Large Scale Language Modeling with Mixtures of Experts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Mixture of Experts layers (MoEs) enable efficient scaling of language models through conditional computation. This paper presents a detailed empirical study of how autoregressive MoE language models scale in comparison with dense models in a wide range of settings: in- and out-of-domain language modeling, zero- and few-shot priming, and full-shot fine-tuning. With the exception of fine-tuning, we find MoEs to be substantially more compute efficient. At more modest training budgets, MoEs can match the performance of dense models using ~4 times less compute. This gap narrows at scale, but our largest MoE model (1.1T parameters) consistently outperforms a compute-equivalent dense model (6.7B parameters). Overall, this performance gap varies greatly across tasks and domains, suggesting that MoE and dense models generalize differently in ways that are worthy of future study. We make our code and models publicly available for research use.", + "author": "Mikel Artetxe; Shruti Bhosale; Naman Goyal; Todor Mihaylov; Myle Ott; Sam Shleifer; Xi Victoria Lin; Jingfei Du; Srinivasan Iyer; Ramakanth Pasunuru; Giridharan Anantharaman; Xian Li; Shuohui Chen; Halil Akin; Mandeep Baines; Louis Martin; Xing Zhou; Punit Singh Koura; Brian O\u2019Horo; Jeffrey Wang; Luke Zettlemoyer; Mona Diab; Zornitsa Kozareva; Veselin Stoyanov", + "authorids": "/m/mikel-artetxe/; /s/shruti-bhosale/; /n/naman-goyal/; /t/todor-mihaylov/; /m/myle-ott/; /s/sam-shleifer/; /x/xi-victoria-lin/; /j/jingfei-du/; /s/srinivasan-iyer/; /r/ramakanth-pasunuru/; /g/giridharan-anantharaman/; /x/xian-li/; /s/shuohui-chen/; /h/halil-akin/; /m/mandeep-baines/; /l/louis-martin/; /x/xing-zhou/; /p/punit-singh-koura/; /b/brian-ohoro/; /j/jeffrey-wang/; /l/luke-zettlemoyer/; /m/mona-diab/; /z/zornitsa-kozareva/; /v/veselin-stoyanov/", + "bibtex": "https://aclanthology.org/2022.emnlp-main.804.bib", + "pdf": "", + "site": "https://aclanthology.org/2022.emnlp-main.804/", + "gs_citation": 146, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11631950114205381462&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": ";;;;;;;;;;;;;;;;;;;;;;;", + "aff_domain": ";;;;;;;;;;;;;;;;;;;;;;;", + "email": ";;;;;;;;;;;;;;;;;;;;;;;", + "github": "", + "project": "", + "author_num": 24 + }, + { + "id": "2022.emnlp-main.312", + "title": "Efficient Nearest Neighbor Emotion Classification with BERT-whitening", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Retrieval-based methods have been proven effective in many NLP tasks. Previous methods use representations from the pre-trained model for similarity search directly. However, the sentence representations from the pre-trained model like BERT perform poorly in retrieving semantically similar sentences, resulting in poor performance of the retrieval-based methods. In this paper, we propose kNN-EC, a simple and efficient non-parametric emotion classification (EC) method using nearest neighbor retrieval. We use BERT-whitening to get better sentence semantics, ensuring that nearest neighbor retrieval works. Meanwhile, BERT-whitening can also reduce memory storage of datastore and accelerate retrieval speed, solving the efficiency problem of the previous methods. kNN-EC average improves the pre-trained model by 1.17 F1-macro on two emotion classification datasets.", + "author": "Wenbiao Yin; Lin Shang", + "authorids": "/w/wenbiao-yin/; /l/lin-shang/", + "bibtex": "@inproceedings{yin-shang-2022-efficient,\n title = \"Efficient Nearest Neighbor Emotion Classification with {BERT}-whitening\",\n author = \"Yin, Wenbiao and\n Shang, Lin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.312/\",\n doi = \"10.18653/v1/2022.emnlp-main.312\",\n pages = \"4738--4745\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.312.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.312/", + "pdf_size": 8847663, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10669236955098082009&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "State Key Laboratory for Novel Software Technology, Department of Computer Science and Technology, Nanjing University, Nanjing, China; State Key Laboratory for Novel Software Technology, Department of Computer Science and Technology, Nanjing University, Nanjing, China", + "aff_domain": "smail.nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;nju.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "Department of Computer Science and Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Nanjing", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.140", + "title": "Efficient Nearest Neighbor Search for Cross-Encoder Models using Matrix Factorization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Efficient k-nearest neighbor search is a fundamental task, foundational for many problems in NLP. When the similarity is measured by dot-product between dual-encoder vectors or L2-distance, there already exist many scalable and efficient search methods. But not so when similarity is measured by more accurate and expensive black-box neural similarity models, such as cross-encoders, which jointly encode the query and candidate neighbor. The cross-encoders\u2019 high computational cost typically limits their use to reranking candidates retrieved by a cheaper model, such as dual encoder or TF-IDF. However, the accuracy of such a two-stage approach is upper-bounded by the recall of the initial candidate set, and potentially requires additional training to align the auxiliary retrieval model with the cross-encoder model. In this paper, we present an approach that avoids the use of a dual-encoder for retrieval, relying solely on the cross-encoder. Retrieval is made efficient with CUR decomposition, a matrix decomposition approach that approximates all pairwise cross-encoder distances from a small subset of rows and columns of the distance matrix. Indexing items using our approach is computationally cheaper than training an auxiliary dual-encoder model through distillation. Empirically, for k > 10, our approach provides test-time recall-vs-computational cost trade-offs superior to the current widely-used methods that re-rank items retrieved using a dual-encoder or TF-IDF.", + "author": "Nishant Yadav; Nicholas Monath; Rico Angell; Manzil Zaheer; Andrew McCallum", + "authorids": "/n/nishant-yadav/; /n/nicholas-monath/; /r/rico-angell/; /m/manzil-zaheer/; /a/andrew-mccallum/", + "bibtex": "@inproceedings{yadav-etal-2022-efficient,\n title = \"Efficient Nearest Neighbor Search for Cross-Encoder Models using Matrix Factorization\",\n author = \"Yadav, Nishant and\n Monath, Nicholas and\n Angell, Rico and\n Zaheer, Manzil and\n McCallum, Andrew\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.140/\",\n doi = \"10.18653/v1/2022.emnlp-main.140\",\n pages = \"2171--2194\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.140.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.140/", + "pdf_size": 1916564, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16992013306093954887&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Massachusetts Amherst; Google Research; University of Massachusetts Amherst; Google Research; University of Massachusetts Amherst", + "aff_domain": "cs.umass.edu;google.com;cs.umass.edu;google.com;cs.umass.edu", + "email": "cs.umass.edu;google.com;cs.umass.edu;google.com;cs.umass.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;1;0", + "aff_unique_norm": "University of Massachusetts Amherst;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.umass.edu;https://research.google", + "aff_unique_abbr": "UMass Amherst;Google Research", + "aff_campus_unique_index": "0;1;0;1;0", + "aff_campus_unique": "Amherst;Mountain View", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.502", + "title": "Efficient Pre-training of Masked Language Model via Concept-based Curriculum Masking", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Self-supervised pre-training has achieved remarkable success in extensive natural language processing tasks. Masked language modeling (MLM) has been widely used for pre-training effective bidirectional representations but comes at a substantial training cost. In this paper, we propose a novel concept-based curriculum masking (CCM) method to efficiently pre-train a language model. CCM has two key differences from existing curriculum learning approaches to effectively reflect the nature of MLM. First, we introduce a novel curriculum that evaluates the MLM difficulty of each token based on a carefully-designed linguistic difficulty criterion. Second, we construct a curriculum that masks easy words and phrases first and gradually masks related ones to the previously masked ones based on a knowledge graph. Experimental results show that CCM significantly improves pre-training efficiency. Specifically, the model trained with CCM shows comparative performance with the original BERT on the General Language Understanding Evaluation benchmark at half of the training cost.", + "author": "Mingyu Lee; Jun-Hyung Park; Junho Kim; Kang-Min Kim; SangKeun Lee", + "authorids": "/m/mingyu-lee/; /j/jun-hyung-park/; /j/junho-kim/; /k/kang-min-kim/; /s/sangkeun-lee/", + "bibtex": "@inproceedings{lee-etal-2022-efficient-pre,\n title = \"Efficient Pre-training of Masked Language Model via Concept-based Curriculum Masking\",\n author = \"Lee, Mingyu and\n Park, Jun-Hyung and\n Kim, Junho and\n Kim, Kang-Min and\n Lee, SangKeun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.502/\",\n doi = \"10.18653/v1/2022.emnlp-main.502\",\n pages = \"7417--7427\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.502.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.502/", + "pdf_size": 1071462, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7300447767080851894&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea; Department of Computer Science and Engineering, Korea University, Seoul, Republic of Korea; Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea; Department of Data Science, The Catholic University of Korea, Bucheon, Republic of Korea; Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea+Department of Computer Science and Engineering, Korea University, Seoul, Republic of Korea", + "aff_domain": "korea.ac.kr;korea.ac.kr;korea.ac.kr;catholic.ac.kr;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;korea.ac.kr;catholic.ac.kr;korea.ac.kr", + "github": "https://github.com/KoreaMGLEE/Concept-based-curriculum-masking", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0+0", + "aff_unique_norm": "Korea University;The Catholic University of Korea", + "aff_unique_dep": "Department of Artificial Intelligence;Department of Data Science", + "aff_unique_url": "http://www.korea.ac.kr;http://www.cuk.edu.ko", + "aff_unique_abbr": "KU;CUK", + "aff_campus_unique_index": "0;0;0;1;0+0", + "aff_campus_unique": "Seoul;Bucheon", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "Republic of Korea" + }, + { + "id": "2022.findings-emnlp.531", + "title": "Efficient Zero-shot Event Extraction with Context-Definition Alignment", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Event extraction (EE) is the task of identifying interested event mentions from text.Conventional efforts mainly focus on the supervised setting. However, these supervised models cannot generalize to event types out of the pre-defined ontology. To fill this gap, many efforts have been devoted to the zero-shot EE problem. This paper follows the trend of modeling event-type semantics but moves one step further. We argue that using the static embedding of the event type name might not be enough because a single word could be ambiguous, and we need a sentence to define the type semantics accurately. To model the definition semantics, we use two separate transformer models to project the contextualized event mentions and corresponding definitions into the same embedding space and then minimize their embedding distance via contrastive learning. On top of that, we also propose a warming phase to help the model learn the minor difference between similar definitions. We name our approach Zero-shot Event extraction with Definition (ZED). Experiments on the MAVEN dataset show that our model significantly outperforms all previous zero-shot EE methods with fast inference speed due to the disjoint design. Further experiments also show that can be easily applied to the few-shot setting when the annotation is available and consistently outperforms baseline supervised methods.", + "author": "Hongming Zhang; Wenlin Yao; Dong Yu", + "authorids": "/h/hongming-zhang/; /w/wenlin-yao/; /d/dong-yu/", + "bibtex": "@inproceedings{zhang-etal-2022-efficient-zero,\n title = \"Efficient Zero-shot Event Extraction with Context-Definition Alignment\",\n author = \"Zhang, Hongming and\n Yao, Wenlin and\n Yu, Dong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.531/\",\n doi = \"10.18653/v1/2022.findings-emnlp.531\",\n pages = \"7169--7179\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.531.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.531/", + "pdf_size": 817897, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10368482192597125192&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Tencent AI Lab, Bellevue, USA; Tencent AI Lab, Bellevue, USA; Tencent AI Lab, Bellevue, USA", + "aff_domain": "global.tencent.com;global.tencent.com;global.tencent.com", + "email": "global.tencent.com;global.tencent.com;global.tencent.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Tencent", + "aff_unique_dep": "AI Lab", + "aff_unique_url": "https://ai.tencent.com", + "aff_unique_abbr": "Tencent AI Lab", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Bellevue", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.334", + "title": "Efficiently Tuned Parameters Are Task Embeddings", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Intermediate-task transfer can benefit a wide range of NLP tasks with properly selected source datasets. However, it is computationally infeasible to experiment with all intermediate transfer combinations, making choosing a useful source task a challenging problem. In this paper, we anticipate that task-specific parameters updated in parameter-efficient tuning methods are likely to encode task-specific information. Therefore, such parameters can be predictive for inter-task transferability. Thus, we propose to exploit these efficiently tuned parameters as off-the-shelf task embeddings for the efficient selection of source datasets for intermediate-task transfer. We experiment with 11 text classification tasks and 11 question answering tasks. Experimental results show that our approach consistently outperforms existing inter-task transferability prediction methods while being conceptually simple and computationally efficient. Our analysis also reveals that the ability of efficiently tuned parameters on transferability prediction is disentangled with their in-task performance. This allows us to use parameters from early checkpoints as task embeddings to further improve efficiency.", + "author": "Wangchunshu Zhou; Canwen Xu; Julian McAuley", + "authorids": "/w/wangchunshu-zhou/; /c/canwen-xu/; /j/julian-mcauley/", + "bibtex": "@inproceedings{zhou-etal-2022-efficiently,\n title = \"Efficiently Tuned Parameters Are Task Embeddings\",\n author = \"Zhou, Wangchunshu and\n Xu, Canwen and\n McAuley, Julian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.334/\",\n doi = \"10.18653/v1/2022.emnlp-main.334\",\n pages = \"5007--5014\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.334.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.334/", + "pdf_size": 226778, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11417148946959425601&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "ETH Zurich; University of California, San Diego; University of California, San Diego", + "aff_domain": "inf.ethz.ch;ucsd.edu;ucsd.edu", + "email": "inf.ethz.ch;ucsd.edu;ucsd.edu", + "github": "https://github.com/JetRunner/TuPaTE", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "ETH Zurich;University of California, San Diego", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ethz.ch;https://www.ucsd.edu", + "aff_unique_abbr": "ETHZ;UCSD", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";San Diego", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "Switzerland;United States" + }, + { + "id": "2022.emnlp-main.721", + "title": "Eliciting Knowledge from Large Pre-Trained Models for Unsupervised Knowledge-Grounded Conversation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent advances in large-scale pre-training provide large models with the potential to learn knowledge from the raw text. It is thus natural to ask whether it is possible to leverage these large models as knowledge bases for downstream tasks. In this work, we answer the aforementioned question in unsupervised knowledge-grounded conversation. We explore various methods that best elicit knowledge from large models. Our human study indicates that, though hallucinations exist, large models post the unique advantage of being able to output common sense and summarize facts that cannot be directly retrieved from the search engine. To better exploit such generated knowledge in dialogue generation, we treat the generated knowledge as a noisy knowledge source and propose the posterior-based reweighing as well as the noisy training strategy. Empirical results on two benchmarks show advantages over the state-of-the-art methods.", + "author": "Yanyang Li; Jianqiao Zhao; Michael Lyu; Liwei Wang", + "authorids": "/y/yanyang-li/; /j/jianqiao-zhao/; /m/michael-lyu/; /l/liwei-wang/", + "bibtex": "@inproceedings{li-etal-2022-eliciting,\n title = \"Eliciting Knowledge from Large Pre-Trained Models for Unsupervised Knowledge-Grounded Conversation\",\n author = \"Li, Yanyang and\n Zhao, Jianqiao and\n Lyu, Michael and\n Wang, Liwei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.721/\",\n doi = \"10.18653/v1/2022.emnlp-main.721\",\n pages = \"10551--10564\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.721.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.721/", + "pdf_size": 375224, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2435271218138198056&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Engineering, The Chinese University of Hong Kong; Department of Computer Science and Engineering, The Chinese University of Hong Kong; Department of Computer Science and Engineering, The Chinese University of Hong Kong; Department of Computer Science and Engineering, The Chinese University of Hong Kong", + "aff_domain": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk", + "email": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk;cse.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "The Chinese University of Hong Kong", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.cuhk.edu.hk", + "aff_unique_abbr": "CUHK", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Hong Kong", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.189", + "title": "Eliciting and Understanding Cross-task Skills with Task-level Mixture-of-Experts", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent works suggest that transformer models are capable of multi-tasking on diverse NLP tasks and adapt to new tasks efficiently. However, the potential of these multi-task models may be limited as they use the same set of parameters for all tasks. In contrast, humans tackle tasks in a more flexible way, by making proper presumptions on what skills and knowledge are relevant and executing only the necessary computations. Inspired by this, we propose to use task-level mixture-of-expert models, which has a collection of transformer layers (i.e., experts) and a router component to choose among these experts dynamically and flexibly. We find that these models help improve the average performance gain (ARG) metric by 2.6% when adapting to unseen tasks in few-shot settings, and by 5.6% in zero-shot generalization settings. Further, we show that the learned routing decisions and experts partly rediscover human categorization of NLP tasks \u2013 certain experts are strongly associated with extractive tasks, some with classification tasks, and some with tasks requiring world knowledge.", + "author": "Qinyuan Ye; Juan Zha; Xiang Ren", + "authorids": "/q/qinyuan-ye/; /j/juan-zha/; /x/xiang-ren/", + "bibtex": "@inproceedings{ye-etal-2022-eliciting,\n title = \"Eliciting and Understanding Cross-task Skills with Task-level Mixture-of-Experts\",\n author = \"Ye, Qinyuan and\n Zha, Juan and\n Ren, Xiang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.189/\",\n doi = \"10.18653/v1/2022.findings-emnlp.189\",\n pages = \"2567--2592\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.189.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.189/", + "pdf_size": 720854, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2433542995670723253&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Southern California; University of Southern California; University of Southern California", + "aff_domain": "usc.edu;usc.edu;usc.edu", + "email": "usc.edu;usc.edu;usc.edu", + "github": "https://github.com/INK-USC/CrossTaskMoE", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "", + "aff_unique_url": "https://www.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.340", + "title": "Empathetic Dialogue Generation via Sensitive Emotion Recognition and Sensible Knowledge Selection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Empathy, which is widely used in psychological counseling, is a key trait of everyday human conversations. Equipped with commonsense knowledge, current approaches to empathetic response generation focus on capturing implicit emotion within dialogue context, where the emotions are treated as a static variable throughout the conversations. However, emotions change dynamically between utterances, which makes previous works difficult to perceive the emotion flow and predict the correct emotion of the target response, leading to inappropriate response. Furthermore, simply importing commonsense knowledge without harmonization may trigger the conflicts between knowledge and emotion, which confuse the model to choose the correct information to guide the generation process. To address the above problems, we propose a Serial Encoding and Emotion-Knowledge interaction (SEEK) method for empathetic dialogue generation. We use a fine-grained encoding strategy which is more sensitive to the emotion dynamics (emotion flow) in the conversations to predict the emotion-intent characteristic of response. Besides, we design a novel framework to model the interaction between knowledge and emotion to solve the conflicts generate more sensible response. Extensive experiments on the utterance-level annotated EMPATHETICDIALOGUES demonstrate that SEEK outperforms the strong baseline in both automatic and manual evaluations.", + "author": "Lanrui Wang; Jiangnan Li; Zheng Lin; Fandong Meng; Chenxu Yang; Weiping Wang; Jie Zhou", + "authorids": "/l/lanrui-wang/; /j/jiangnan-li/; /z/zheng-lin/; /f/fandong-meng/; /c/chenxu-yang/; /w/weiping-wang/; /j/jie-zhou/", + "bibtex": "@inproceedings{wang-etal-2022-empathetic,\n title = \"Empathetic Dialogue Generation via Sensitive Emotion Recognition and Sensible Knowledge Selection\",\n author = \"Wang, Lanrui and\n Li, Jiangnan and\n Lin, Zheng and\n Meng, Fandong and\n Yang, Chenxu and\n Wang, Weiping and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.340/\",\n doi = \"10.18653/v1/2022.findings-emnlp.340\",\n pages = \"4634--4645\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.340.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.340/", + "pdf_size": 550447, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8122332727774729323&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China", + "aff_domain": "iie.ac.cn;iie.ac.cn;iie.ac.cn;tencent.com;iie.ac.cn;iie.ac.cn;tencent.com", + "email": "iie.ac.cn;iie.ac.cn;iie.ac.cn;tencent.com;iie.ac.cn;iie.ac.cn;tencent.com", + "github": "https://github.com/wlr737/EMNLP2022-SEEK", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;2;0+1;0+1;2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Tencent Inc", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;Pattern Recognition Center, WeChat AI", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.tencent.com", + "aff_unique_abbr": "CAS;UCAS;Tencent", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0+0;0+0;0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.475", + "title": "Empathetic and Emotionally Positive Conversation Systems with an Emotion-specific Query-Response Memory", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Emotional conversation systems generate responses for the input queries considering the speaker\u2019s emotions in a conversation. Existing emotional conversation systems output emotional responses according to either a given emotion or the user\u2019s emotion reflected in the input queries. Following a given emotion may lead to an emotional drift between the given emotion and the conversation state, and following only the user\u2019s emotion may aggravate the user\u2019s negative feelings if users suffer from a negative mood. In this paper, we propose to generate empathetic responses catering to the user\u2019s emotions while leading the conversation to be emotionally positive. Particularly, by abstracting the conversation corpus, we extract and store the different responding strategies for different users\u2019 emotions and conversational topics into a memory. We encourage positive emotions in conversation via a sentiment evaluator. We model the memory outputs with a Gaussian mixture distribution and sample a final responding strategy from the distribution. The strategy acts as a condition to a transformer model to generate responses. The experiments verify our model surpasses the baseline methods in appropriateness, diversity, and generating emotionally positive responses.", + "author": "Zhiliang Tian; Yinliang Wang; Yiping Song; Chi Zhang; Dongkyu Lee; Yingxiu Zhao; Dongsheng Li; Nevin L. Zhang", + "authorids": "/z/zhiliang-tian/; /y/yinliang-wang/; /y/yiping-song/; /c/chi-zhang/; /d/dongkyu-lee/; /y/yingxiu-zhao/; /d/dongsheng-li/; /n/nevin-l-zhang/", + "bibtex": "@inproceedings{tian-etal-2022-empathetic,\n title = \"Empathetic and Emotionally Positive Conversation Systems with an Emotion-specific Query-Response Memory\",\n author = \"Tian, Zhiliang and\n Wang, Yinliang and\n Song, Yiping and\n Zhang, Chi and\n Lee, Dongkyu and\n Zhao, Yingxiu and\n Li, Dongsheng and\n Zhang, Nevin L.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.475/\",\n doi = \"10.18653/v1/2022.findings-emnlp.475\",\n pages = \"6364--6376\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.475.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.475/", + "pdf_size": 579075, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16758373218179926515&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "College of Computer, National University of Defense Technology; Department of CSE, The Hong Kong University of Science and Technology; College of Science, National University of Defense Technology + Department of CSE, The Hong Kong University of Science and Technology; Department of CSE, The Hong Kong University of Science and Technology; Department of CSE, The Hong Kong University of Science and Technology; Department of CSE, The Hong Kong University of Science and Technology; College of Computer, National University of Defense Technology; Department of CSE, The Hong Kong University of Science and Technology", + "aff_domain": "gmail.com;gmail.com;nudt.edu.cn;cse.ust.hk;cse.ust.hk;cse.ust.hk;163.com;cse.ust.hk", + "email": "gmail.com;gmail.com;nudt.edu.cn;cse.ust.hk;cse.ust.hk;cse.ust.hk;163.com;cse.ust.hk", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0+1;1;1;1;0;1", + "aff_unique_norm": "National University of Defense Technology;The Hong Kong University of Science and Technology", + "aff_unique_dep": "College of Computer;Department of CSE", + "aff_unique_url": "http://www.nudt.edu.cn/;https://www.ust.hk", + "aff_unique_abbr": "NUDT;HKUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.203", + "title": "Empowering Dual-Encoder with Query Generator for Cross-Lingual Dense Retrieval", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In monolingual dense retrieval, lots of works focus on how to distill knowledge from cross-encoder re-ranker to dual-encoder retriever and these methods achieve better performance due to the effectiveness of cross-encoder re-ranker. However, we find that the performance of the cross-encoder re-ranker is heavily influenced by the number of training samples and the quality of negative samples, which is hard to obtain in the cross-lingual setting. In this paper, we propose to use a query generator as the teacher in the cross-lingual setting, which is less dependent on enough training samples and high-quality negative samples. In addition to traditional knowledge distillation, we further propose a novel enhancement method, which uses the query generator to help the dual-encoder align queries from different languages, but does not need any additional parallel sentences. The experimental results show that our method outperforms the state-of-the-art methods on two benchmark datasets.", + "author": "Houxing Ren; Linjun Shou; Ning Wu; Ming Gong; Daxin Jiang", + "authorids": "/h/houxing-ren/; /l/linjun-shou/; /n/ning-wu/; /m/ming-gong/; /d/daxin-jiang/", + "bibtex": "@inproceedings{ren-etal-2022-empowering,\n title = \"Empowering Dual-Encoder with Query Generator for Cross-Lingual Dense Retrieval\",\n author = \"Ren, Houxing and\n Shou, Linjun and\n Wu, Ning and\n Gong, Ming and\n Jiang, Daxin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.203/\",\n doi = \"10.18653/v1/2022.emnlp-main.203\",\n pages = \"3107--3121\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.203.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.203/", + "pdf_size": 635616, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8365062453454421609&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 5, + "aff": "School of Computer Science and Engineering, Beihang University; Microsoft STC Asia; Microsoft STC Asia; Microsoft STC Asia; Microsoft STC Asia", + "aff_domain": "buaa.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "buaa.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;1", + "aff_unique_norm": "Beihang University;Microsoft", + "aff_unique_dep": "School of Computer Science and Engineering;STC", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.microsoft.com", + "aff_unique_abbr": "BUAA;MS", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.650", + "title": "Empowering Language Models with Knowledge Graph Reasoning for Open-Domain Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Answering open-domain questions requires world knowledge about in-context entities. As pre-trained Language Models (LMs) lack the power to store all required knowledge, external knowledge sources, such as knowledge graphs, are often used to augment LMs. In this work, we propose knOwledge REasOning empowered Language Model(OREO-LM), which consists of a novel Knowledge Interaction Layer that can be flexibly plugged into existing Transformer-based LMs to interact with a differentiable Knowledge Graph Reasoning module collaboratively. In this way, LM guides KG to walk towards the desired answer, while the retrieved knowledge improves LM.By adopting OREO-LM to RoBERTa and T5, we show significant performance gain, achieving state-of-art results in the Closed-Book setting. The performance enhancement is mainly from the KG reasoning\u2019s capacity to infer missing relational facts. In addition, OREO-LM provides reasoning paths as rationales to interpret the model\u2019s decision.", + "author": "Ziniu Hu; Yichong Xu; Wenhao Yu; Shuohang Wang; Ziyi Yang; Chenguang Zhu; Kai-Wei Chang; Yizhou Sun", + "authorids": "/z/ziniu-hu/; /y/yichong-xu/; /w/wenhao-yu/; /s/shuohang-wang/; /z/ziyi-yang/; /c/chenguang-zhu/; /k/kai-wei-chang/; /y/yizhou-sun/", + "bibtex": "@inproceedings{hu-etal-2022-empowering,\n title = \"Empowering Language Models with Knowledge Graph Reasoning for Open-Domain Question Answering\",\n author = \"Hu, Ziniu and\n Xu, Yichong and\n Yu, Wenhao and\n Wang, Shuohang and\n Yang, Ziyi and\n Zhu, Chenguang and\n Chang, Kai-Wei and\n Sun, Yizhou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.650/\",\n doi = \"10.18653/v1/2022.emnlp-main.650\",\n pages = \"9562--9581\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.650.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.650/", + "pdf_size": 8271561, + "gs_citation": 61, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7068596698803815738&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "", + "author_num": 8 + }, + { + "id": "2022.emnlp-main.525", + "title": "Empowering the Fact-checkers! Automatic Identification of Claim Spans on Twitter", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The widespread diffusion of medical and political claims in the wake of COVID-19 has led to a voluminous rise in misinformation and fake news. The current vogue is to employ manual fact-checkers to efficiently classify and verify such data to combat this avalanche of claim-ridden misinformation. However, the rate of information dissemination is such that it vastly outpaces the fact-checkers\u2019 strength. Therefore, to aid manual fact-checkers in eliminating the superfluous content, it becomes imperative to automatically identify and extract the snippets of claim-worthy (mis)information present in a post. In this work, we introduce the novel task of Claim Span Identification (CSI). We propose CURT, a large-scale Twitter corpus with token-level claim spans on more than 7.5k tweets. Furthermore, along with the standard token classification baselines, we benchmark our dataset with DABERTa, an adapter-based variation of RoBERTa. The experimental results attest that DABERTa outperforms the baseline systems across several evaluation metrics, improving by about 1.5 points. We also report detailed error analysis to validate the model\u2019s performance along with the ablation studies. Lastly, we release our comprehensive span annotation guidelines for public use.", + "author": "Megha Sundriyal; Atharva Kulkarni; Vaibhav Pulastya; Md. Shad Akhtar; Tanmoy Chakraborty", + "authorids": "/m/megha-sundriyal/; /a/atharva-kulkarni/; /v/vaibhav-pulastya/; /m/md-shad-akhtar/; /t/tanmoy-chakraborty/", + "bibtex": "@inproceedings{sundriyal-etal-2022-empowering,\n title = \"Empowering the Fact-checkers! Automatic Identification of Claim Spans on {T}witter\",\n author = \"Sundriyal, Megha and\n Kulkarni, Atharva and\n Pulastya, Vaibhav and\n Akhtar, Md. Shad and\n Chakraborty, Tanmoy\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.525/\",\n doi = \"10.18653/v1/2022.emnlp-main.525\",\n pages = \"7701--7715\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.525.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.525/", + "pdf_size": 565676, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6649655271658578760&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff": "IIIT Delhi, India; IIIT Delhi, India; IIIT Delhi, India; IIIT Delhi, India; IIT Delhi, India", + "aff_domain": "iiitd.ac.in;iiitd.ac.in;iiitd.ac.in;iiitd.ac.in;ee.iitd.ac.in", + "email": "iiitd.ac.in;iiitd.ac.in;iiitd.ac.in;iiitd.ac.in;ee.iitd.ac.in", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "IIIT Delhi;Indian Institute of Technology Delhi", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.iiitdelhi.ac.in;https://www.iitd.ac.in", + "aff_unique_abbr": "IIITD;IITD", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Delhi", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.findings-emnlp.359", + "title": "EnDex: Evaluation of Dialogue Engagingness at Scale", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We propose EnDex, the first human-reaction based model to evaluate dialogue engagingness. EnDex is trained on 80k Reddit-based Engagement Dataset (RED) curated using a novel distant-supervision framework. Engagingness is a key measure that captures high-level quality of AI dialogue systems and closely reflects actual user experience. However, data shortage, plus the abstract and extensive definition of engagingness makes it challenging to develop an automatic metric. Our work departs from mainstream approaches that use synthetic negative examples to train binary classifiers, and instead, proposes a solution using distant-supervision from human-reaction feedback. To support the soundness of our EnDex metric, we offer a theoretical foundation for engagement, an extensive ablation study, and empirical evidence of high correlation on five engagingness related datasets. We will release code, off-the-shelf EnDex model, and a large-scale dataset upon paper publication to facilitate future research.", + "author": "Guangxuan Xu; Ruibo Liu; Fabrice Harel-Canada; Nischal Reddy Chandra; Nanyun Peng", + "authorids": "/g/guangxuan-xu/; /r/ruibo-liu/; /f/fabrice-harel-canada/; /n/nischal-reddy-chandra/; /n/nanyun-peng/", + "bibtex": "@inproceedings{xu-etal-2022-endex,\n title = \"{E}n{D}ex: Evaluation of Dialogue Engagingness at Scale\",\n author = \"Xu, Guangxuan and\n Liu, Ruibo and\n Harel-Canada, Fabrice and\n Chandra, Nischal Reddy and\n Peng, Nanyun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.359/\",\n doi = \"10.18653/v1/2022.findings-emnlp.359\",\n pages = \"4884--4893\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.359.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.359/", + "pdf_size": 723481, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6171846506698296668&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of California, Los Angeles; Dartmouth College; University of California, Los Angeles; University of California, Los Angeles; University of California, Los Angeles", + "aff_domain": "cs.ucla.edu; ;cs.ucla.edu;cs.ucla.edu;cs.ucla.edu", + "email": "cs.ucla.edu; ;cs.ucla.edu;cs.ucla.edu;cs.ucla.edu", + "github": "https://github.com/gxxu-ml/EnDex", + "project": "https://www.amazon.science/alexa-prize", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "University of California, Los Angeles;Dartmouth College", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucla.edu;https://www.dartmouth.edu", + "aff_unique_abbr": "UCLA;Dartmouth", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.778", + "title": "End-to-End Neural Discourse Deixis Resolution in Dialogue", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We adapt Lee et al.\u2019s (2018) span-based entity coreference model to the task of end-to-end discourse deixis resolution in dialogue, specifically by proposing extensions to their model that exploit task-specific characteristics. The resulting model, dd-utt, achieves state-of-the-art results on the four datasets in the CODI-CRAC 2021 shared task.", + "author": "Shengjie Li; Vincent Ng", + "authorids": "/s/shengjie-li/; /v/vincent-ng/", + "bibtex": "@inproceedings{li-ng-2022-end,\n title = \"End-to-End Neural Discourse Deixis Resolution in Dialogue\",\n author = \"Li, Shengjie and\n Ng, Vincent\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.778/\",\n doi = \"10.18653/v1/2022.emnlp-main.778\",\n pages = \"11322--11334\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.778.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.778/", + "pdf_size": 288511, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:861tFWW5TukJ:scholar.google.com/&scioq=End-to-End+Neural+Discourse+Deixis+Resolution+in+Dialogue&hl=en&as_sdt=0,5", + "gs_version_total": 4, + "aff": "Human Language Technology Research Institute, University of Texas at Dallas; Human Language Technology Research Institute, University of Texas at Dallas", + "aff_domain": "hlt.utdallas.edu;hlt.utdallas.edu", + "email": "hlt.utdallas.edu;hlt.utdallas.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Texas at Dallas", + "aff_unique_dep": "Human Language Technology Research Institute", + "aff_unique_url": "https://www.utdallas.edu", + "aff_unique_abbr": "UT Dallas", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Dallas", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.59", + "title": "End-to-End Speech to Intent Prediction to improve E-commerce Customer Support Voicebot in Hindi and English", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Automation of on-call customer support relies heavily on accurate and efficient speech-to-intent (S2I) systems. Building such systems using multi-component pipelines can pose various challenges because they require large annotated datasets, have higher latency, and have complex deployment. These pipelines are also prone to compounding errors. To overcome these challenges, we discuss an end-to-end (E2E) S2I model for customer support voicebot task in a bilingual setting. We show how we can solve E2E intent classification by leveraging a pre-trained automatic speech recognition (ASR) model with slight modification and fine-tuning on small annotated datasets. Experimental results show that our best E2E model outperforms a conventional pipeline by a relative ~27% on the F1 score.", + "author": "Abhinav Goyal; Anupam Singh; Nikesh Garera", + "authorids": "/a/abhinav-goyal/; /a/anupam-singh/; /n/nikesh-garera/", + "bibtex": "@inproceedings{goyal-etal-2022-end,\n title = \"End-to-End Speech to Intent Prediction to improve {E}-commerce Customer Support Voicebot in {H}indi and {E}nglish\",\n author = \"Goyal, Abhinav and\n Singh, Anupam and\n Garera, Nikesh\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.59/\",\n doi = \"10.18653/v1/2022.emnlp-industry.59\",\n pages = \"579--586\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.59.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.59/", + "pdf_size": 299930, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12575189811978931299&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Flipkart; Flipkart; Flipkart", + "aff_domain": "flipkart.com;flipkart.com;flipkart.com", + "email": "flipkart.com;flipkart.com;flipkart.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Flipkart", + "aff_unique_dep": "", + "aff_unique_url": "https://www.flipkart.com", + "aff_unique_abbr": "Flipkart", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.emnlp-main.742", + "title": "End-to-End Unsupervised Vision-and-Language Pre-training with Referring Expression Matching", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently there has been an emerging interest in unsupervised vision-and-language pre-training (VLP) that learns multimodal representations without parallel image-caption data. These pioneering works significantly reduce the cost of VLP on data collection and achieve promising results compared to supervised VLP. However, existing unsupervised VLP methods take as input pre-extracted region-based visual features from external object detectors, which both limits flexibility and reduces computational efficiency. In this paper, we explore end-to-end unsupervised VLP with a vision encoder to directly encode images. The vision encoder is pre-trained on image-only data and jointly optimized during multimodal pre-training. To further enhance the learned cross-modal features, we propose a novel pre-training task that predicts which patches contain an object referred to in natural language from the encoded visual features. Extensive experiments on four vision-and-language tasks show that our approach outperforms previous unsupervised VLP methods and obtains new state-of-the-art results.", + "author": "Chi Chen; Peng Li; Maosong Sun; Yang Liu", + "authorids": "/c/chi-chen/; /p/peng-li/; /m/maosong-sun/; /y/yang-liu/", + "bibtex": "@inproceedings{chen-etal-2022-end,\n title = \"End-to-End Unsupervised Vision-and-Language Pre-training with Referring Expression Matching\",\n author = \"Chen, Chi and\n Li, Peng and\n Sun, Maosong and\n Liu, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.742/\",\n doi = \"10.18653/v1/2022.emnlp-main.742\",\n pages = \"10799--10810\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.742.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.742/", + "pdf_size": 9162573, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4187807599993137924&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/THUNLP-MT/E2E-UVLP", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.621", + "title": "English Contrastive Learning Can Learn Universal Cross-lingual Sentence Embeddings", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Universal cross-lingual sentence embeddings map semantically similar cross-lingual sentences into a shared embedding space. Aligning cross-lingual sentence embeddings usually requires supervised cross-lingual parallel sentences. In this work, we propose mSimCSE, which extends SimCSE to multilingual settings and reveal that contrastive learning on English data can surprisingly learn high-quality universal cross-lingual sentence embeddings without any parallel data.In unsupervised and weakly supervised settings, mSimCSE significantly improves previous sentence embedding methods on cross-lingual retrieval and multilingual STS tasks. The performance of unsupervised mSimCSE is comparable to fully supervised methods in retrieving low-resource languages and multilingual STS.The performance can be further enhanced when cross-lingual NLI data is available.", + "author": "Yaushian Wang; Ashley Wu; Graham Neubig", + "authorids": "/y/yaushian-wang/; /a/ashley-wu/; /g/graham-neubig/", + "bibtex": "@inproceedings{wang-etal-2022-english,\n title = \"{E}nglish Contrastive Learning Can Learn Universal Cross-lingual Sentence Embeddings\",\n author = \"Wang, Yaushian and\n Wu, Ashley and\n Neubig, Graham\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.621/\",\n doi = \"10.18653/v1/2022.emnlp-main.621\",\n pages = \"9122--9133\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.621.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.621/", + "pdf_size": 374197, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8392401945315434508&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 3, + "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", + "aff_domain": "gmail.com;andrew.cmu.edu;cs.cmu.edu", + "email": "gmail.com;andrew.cmu.edu;cs.cmu.edu", + "github": "https://github.com/yaushian/mSimCSE", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.334", + "title": "Enhancing Automatic Readability Assessment with Pre-training and Soft Labels for Ordinal Regression", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The readability assessment task aims to assign a difficulty grade to a text. While neural models have recently demonstrated impressive performance, most do not exploit the ordinal nature of the difficulty grades, and make little effort for model initialization to facilitate fine-tuning. We address these limitations with soft labels for ordinal regression, and with model pre-training through prediction of pairwise relative text difficulty. We incorporate these two components into a model based on hierarchical attention networks, and evaluate its performance on both English and Chinese datasets. Experimental results show that our proposed model outperforms competitive neural models and statistical classifiers on most datasets.", + "author": "Jinshan Zeng; Yudong Xie; Xianglong Yu; John Lee; Ding-Xuan Zhou", + "authorids": "/j/jinshan-zeng/; /y/yudong-xie/; /x/xianglong-yu/; /j/john-s-y-lee/; /d/ding-xuan-zhou/", + "bibtex": "@inproceedings{zeng-etal-2022-enhancing,\n title = \"Enhancing Automatic Readability Assessment with Pre-training and Soft Labels for Ordinal Regression\",\n author = \"Zeng, Jinshan and\n Xie, Yudong and\n Yu, Xianglong and\n Lee, John and\n Zhou, Ding-Xuan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.334/\",\n doi = \"10.18653/v1/2022.findings-emnlp.334\",\n pages = \"4557--4568\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.334.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.334/", + "pdf_size": 835029, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15718194771658644561&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 2, + "aff": "School of Computer and Information Science, Jiangxi Normal University; School of Computer and Information Science, Jiangxi Normal University; School of Computer and Information Science, Jiangxi Normal University; Department of Linguistics and Translation, City University of Hong Kong; School of Mathematics and Statistics, University of Sydney", + "aff_domain": "jxnu.edu.cn;gmail.com;jxnu.edu.cn;cityu.edu.hk;sydney.edu.au", + "email": "jxnu.edu.cn;gmail.com;jxnu.edu.cn;cityu.edu.hk;sydney.edu.au", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;2", + "aff_unique_norm": "Jiangxi Normal University;City University of Hong Kong;University of Sydney", + "aff_unique_dep": "School of Computer and Information Science;Department of Linguistics and Translation;School of Mathematics and Statistics", + "aff_unique_url": "http://www.jxnu.edu.cn;https://www.cityu.edu.hk;https://www.sydney.edu.au", + "aff_unique_abbr": ";CityU;USYD", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Hong Kong;Sydney", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "2022.emnlp-main.543", + "title": "Enhancing Joint Multiple Intent Detection and Slot Filling with Global Intent-Slot Co-occurrence", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multi-intent detection and slot filling joint model attracts more and more attention since it can handle multi-intent utterances, which is closer to complex real-world scenarios. Most existing joint models rely entirely on the training procedure to obtain the implicit correlation between intents and slots. However, they ignore the fact that leveraging the rich global knowledge in the corpus can determine the intuitive and explicit correlation between intents and slots. In this paper, we aim to make full use of the statistical co-occurrence frequency between intents and slots as prior knowledge to enhance joint multiple intent detection and slot filling. To be specific, an intent-slot co-occurrence graph is constructed based on the entire training corpus to globally discover correlation between intents and slots. Based on the global intent-slot co-occurrence, we propose a novel graph neural network to model the interaction between the two subtasks. Experimental results on two public multi-intent datasets demonstrate that our approach outperforms the state-of-the-art models.", + "author": "Mengxiao Song; Bowen Yu; Li Quangang; Wang Yubin; Tingwen Liu; Hongbo Xu", + "authorids": "/m/mengxiao-song/; /b/bowen-yu/; /l/li-quangang/; /w/wang-yubin/; /t/tingwen-liu/; /h/hongbo-xu/", + "bibtex": "@inproceedings{song-etal-2022-enhancing,\n title = \"Enhancing Joint Multiple Intent Detection and Slot Filling with Global Intent-Slot Co-occurrence\",\n author = \"Song, Mengxiao and\n Yu, Bowen and\n Quangang, Li and\n Yubin, Wang and\n Liu, Tingwen and\n Xu, Hongbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.543/\",\n doi = \"10.18653/v1/2022.emnlp-main.543\",\n pages = \"7967--7977\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.543.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.543/", + "pdf_size": 822463, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8633784322822536117&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Institute of Information Engineering, Chinese Academy of Sciences. Beijing, China; School of Cyber Security, University of Chinese Academy of Sciences. Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences. Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences. Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences. Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences. Beijing, China", + "aff_domain": "iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn", + "email": "iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.462", + "title": "Enhancing Multilingual Language Model with Massive Multilingual Knowledge Triples", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge-enhanced language representation learning has shown promising results across various knowledge-intensive NLP tasks. However, prior methods are limited in efficient utilization of multilingual knowledge graph (KG) data for language model (LM) pretraining. They often train LMs with KGs in indirect ways, relying on extra entity/relation embeddings to facilitate knowledge injection. In this work, we explore methods to make better use of the multilingual annotation and language agnostic property of KG triples, and present novel knowledge based multilingual language models (KMLMs) trained directly on the knowledge triples. We first generate a large amount of multilingual synthetic sentences using the Wikidata KG triples. Then based on the intra- and inter-sentence structures of the generated data, we design pretraining tasks to enable the LMs to not only memorize the factual knowledge but also learn useful logical patterns. Our pretrained KMLMs demonstrate significant performance improvements on a wide range of knowledge-intensive cross-lingual tasks, including named entity recognition (NER), factual knowledge retrieval, relation classification, and a newly designed logical reasoning task.", + "author": "Linlin Liu; Xin Li; Ruidan He; Lidong Bing; Shafiq Joty; Luo Si", + "authorids": "/l/linlin-liu/; /x/xin-li/; /r/ruidan-he/; /l/lidong-bing/; /s/shafiq-joty/; /l/luo-si/", + "bibtex": "@inproceedings{liu-etal-2022-enhancing-multilingual,\n title = \"Enhancing Multilingual Language Model with Massive Multilingual Knowledge Triples\",\n author = \"Liu, Linlin and\n Li, Xin and\n He, Ruidan and\n Bing, Lidong and\n Joty, Shafiq and\n Si, Luo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.462/\",\n doi = \"10.18653/v1/2022.emnlp-main.462\",\n pages = \"6878--6890\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.462.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.462/", + "pdf_size": 383837, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2966061535324026942&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 3, + "aff": "DAMO Academy, Alibaba Group + Nanyang Technological University, Singapore; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; Nanyang Technological University, Singapore + Salesforce Research; DAMO Academy, Alibaba Group", + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;ntu.edu.sg;alibaba-inc.com", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;ntu.edu.sg;alibaba-inc.com", + "github": "https://github.com/ntunlp/kmlm.git", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;0;0;1+2;0", + "aff_unique_norm": "Alibaba Group;Nanyang Technological University;Salesforce", + "aff_unique_dep": "DAMO Academy;;Salesforce Research", + "aff_unique_url": "https://www.alibaba-group.com;https://www.ntu.edu.sg;https://research.salesforce.com", + "aff_unique_abbr": "Alibaba;NTU;Salesforce", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0;0;1+2;0", + "aff_country_unique": "China;Singapore;United States" + }, + { + "id": "2022.findings-emnlp.55", + "title": "Enhancing Out-of-Distribution Detection in Natural Language Understanding via Implicit Layer Ensemble", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Out-of-distribution (OOD) detection aims to discern outliers from the intended data distribution, which is crucial to maintaining high reliability and a good user experience.Most recent studies in OOD detection utilize the information from a single representation that resides in the penultimate layer to determine whether the input is anomalous or not.Although such a method is straightforward, the potential of diverse information in the intermediate layers is overlooked.In this paper, we propose a novel framework based on contrastive learning that encourages intermediate features to learn layer-specialized representations and assembles them implicitly into a single representation to absorb rich information in the pre-trained language model. Extensive experiments in various intent classification and OOD datasets demonstrate that our approach is significantly more effective than other works.", + "author": "Hyunsoo Cho; Choonghyun Park; Jaewook Kang; Kang Min Yoo; Taeuk Kim; Sang-goo Lee", + "authorids": "/h/hyunsoo-cho/; /c/choonghyun-park/; /j/jaewook-kang/; /k/kang-min-yoo/; /t/taeuk-kim/; /s/sang-goo-lee/", + "bibtex": "@inproceedings{cho-etal-2022-enhancing,\n title = \"Enhancing Out-of-Distribution Detection in Natural Language Understanding via Implicit Layer Ensemble\",\n author = \"Cho, Hyunsoo and\n Park, Choonghyun and\n Kang, Jaewook and\n Yoo, Kang Min and\n Kim, Taeuk and\n Lee, Sang-goo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.55/\",\n doi = \"10.18653/v1/2022.findings-emnlp.55\",\n pages = \"783--798\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.55.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.55/", + "pdf_size": 815422, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1079469505072003828&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Seoul National University\u2020; Seoul National University\u2020; NA VER AI Lab\u266e; NA VER AI Lab\u2021+NA VER CLOV A\u266e\u2020; Hanyang University\u00a7\u2217; Seoul National University\u2020", + "aff_domain": "europa.snu.ac.kr;europa.snu.ac.kr;navercorp.com;navercorp.com;hanyang.ac.kr;europa.snu.ac.kr", + "email": "europa.snu.ac.kr;europa.snu.ac.kr;navercorp.com;navercorp.com;hanyang.ac.kr;europa.snu.ac.kr", + "github": "https://github.com/HyunsooCho77/LaCL-official", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;3;0", + "aff_unique_norm": "Seoul National University;NAVER AI Lab;;Hanyang University", + "aff_unique_dep": ";AI Lab;;", + "aff_unique_url": "https://www.snu.ac.kr;https://www.naver.com;;https://www.hanyang.ac.kr", + "aff_unique_abbr": "SNU;NAVER AI;;HYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "South Korea;" + }, + { + "id": "2022.emnlp-main.115", + "title": "Enhancing Self-Consistency and Performance of Pre-Trained Language Models through Natural Language Inference", + "track": "main", + "status": "Main", + "award": false, + "abstract": "While large pre-trained language models are powerful, their predictions often lack logical consistency across test inputs. For example, a state-of-the-art Macaw question-answering (QA) model answers Yes to Is a sparrow a bird? and Does a bird have feet? but answers No to Does a sparrow have feet?. To address this failure mode, we propose a framework, Consistency Correction through Relation Detection, or ConCoRD, for boosting the consistency and accuracy of pre-trained NLP models using pre-trained natural language inference (NLI) models without fine-tuning or re-training. Given a batch of test inputs, ConCoRD samples several candidate outputs for each input and instantiates a factor graph that accounts for both the model\u2019s belief about the likelihood of each answer choice in isolation and the NLI model\u2019s beliefs about pair-wise answer choice compatibility. We show that a weighted MaxSAT solver can efficiently compute high-quality answer choices under this factor graph, improving over the raw model\u2019s predictions. Our experiments demonstrate that ConCoRD consistently boosts accuracy and consistency of off-the-shelf closed-book QA and VQA models using off-the-shelf NLI models, notably increasing accuracy of LXMERT on ConVQA by 5% absolute. See the project website (https://ericmitchell.ai/emnlp-2022-concord/) for code and data.", + "author": "Eric Mitchell; Joseph Noh; Siyan Li; Will Armstrong; Ananth Agarwal; Patrick Liu; Chelsea Finn; Christopher Manning", + "authorids": "/e/eric-mitchell/; /j/joseph-noh/; /s/siyan-li/; /w/will-armstrong/; /a/ananth-agarwal/; /p/patrick-liu/; /c/chelsea-finn/; /c/christopher-d-manning/", + "bibtex": "@inproceedings{mitchell-etal-2022-enhancing,\n title = \"Enhancing Self-Consistency and Performance of Pre-Trained Language Models through Natural Language Inference\",\n author = \"Mitchell, Eric and\n Noh, Joseph and\n Li, Siyan and\n Armstrong, Will and\n Agarwal, Ananth and\n Liu, Patrick and\n Finn, Chelsea and\n Manning, Christopher\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.115/\",\n doi = \"10.18653/v1/2022.emnlp-main.115\",\n pages = \"1754--1768\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.115.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.115/", + "pdf_size": 2439229, + "gs_citation": 47, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2068112158537998084&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Stanford University; Stanford University; Stanford University; Stanford University; Stanford University; Stanford University; Stanford University; Stanford University", + "aff_domain": "cs.stanford.edu; ; ; ; ; ; ; ", + "email": "cs.stanford.edu; ; ; ; ; ; ; ", + "github": "", + "project": "https://ericmitchell.ai/emnlp-2022-concord/", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.537", + "title": "Ensemble Transformer for Efficient and Accurate Ranking Tasks: an Application to Question Answering Systems", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large transformer models can highly improve Answer Sentence Selection (AS2) tasks, but their high computational costs prevent their use in many real-world applications. In this paper, we explore the following research question: How can we make the AS2 models more accurate without significantly increasing their model complexity? To address the question, we propose a Multiple Heads Student architecture (named CERBERUS), an efficient neural network designed to distill an ensemble of large transformers into a single smaller model. CERBERUS consists of two components: a stack of transformer layers that is used to encode inputs, and a set of ranking heads; unlike traditional distillation technique, each of them is trained by distilling a different large transformer architecture in a way that preserves the diversity of the ensemble members. The resulting model captures the knowledge of heterogeneous transformer models by using just a few extra parameters. We show the effectiveness of CERBERUS on three English datasets for AS2; our proposed approach outperforms all single-model distillations we consider, rivaling the state-of-the-art large AS2 models that have 2.7\u00d7 more parameters and run 2.5\u00d7 slower. Code for our model is available at https://github.com/amazon-research/wqa-cerberus.", + "author": "Yoshitomo Matsubara; Luca Soldaini; Eric Lind; Alessandro Moschitti", + "authorids": "/y/yoshitomo-matsubara/; /l/luca-soldaini/; /e/eric-lind/; /a/alessandro-moschitti/", + "bibtex": "@inproceedings{matsubara-etal-2022-ensemble,\n title = \"Ensemble Transformer for Efficient and Accurate Ranking Tasks: an Application to Question Answering Systems\",\n author = \"Matsubara, Yoshitomo and\n Soldaini, Luca and\n Lind, Eric and\n Moschitti, Alessandro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.537/\",\n doi = \"10.18653/v1/2022.findings-emnlp.537\",\n pages = \"7259--7272\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.537.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.537/", + "pdf_size": 655477, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7719841477466099500&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff": "University of California, Irvine + Amazon Alexa AI; Allen Institute for AI; Amazon Alexa AI; Amazon Alexa AI", + "aff_domain": "uci.edu;allenai.org;amazon.com;amazon.com", + "email": "uci.edu;allenai.org;amazon.com;amazon.com", + "github": "https://github.com/amazon-research/wqa-cerberus", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;1;1", + "aff_unique_norm": "University of California, Irvine;Amazon;Allen Institute for AI", + "aff_unique_dep": ";Alexa AI;", + "aff_unique_url": "https://www.uci.edu;https://www.amazon.com;https://allenai.org", + "aff_unique_abbr": "UCI;Amazon;AI2", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Irvine;", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.134", + "title": "Entailer: Answering Questions with Faithful and Truthful Chains of Reasoning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Our goal is a question-answering (QA) system that can show how its answers are implied by its own internal beliefs via a systematic chain of reasoning. Such a capability would allow better understanding of why a model produced the answer it did. Our approach is to recursively combine a trained backward-chainingmodel, capable of generating a set of premises entailing an answer hypothesis, with a verifier that checks that the model itself believes those premises (and the entailment itself) through self-querying. To our knowledge, this is the first system to generate multistep chains that are both faithful (the answer follows from the reasoning) and truthful (the chain reflects the system\u2019s own internal beliefs). In evaluation using two different datasets, users judge that a majority (70%+) of generated chains clearly show how an answer follows from a set of facts - substantially better than a high-performance baseline - while preserving answer accuracy. By materializing model beliefs that systematically support an answer, new opportunities arise for understanding the model\u2019s system of belief, and diagnosing and correcting its misunderstandings when an answer is wrong.", + "author": "Oyvind Tafjord; Bhavana Dalvi Mishra; Peter Clark", + "authorids": "/o/oyvind-tafjord/; /b/bhavana-dalvi/; /p/peter-clark/", + "bibtex": "@inproceedings{tafjord-etal-2022-entailer,\n title = \"Entailer: Answering Questions with Faithful and Truthful Chains of Reasoning\",\n author = \"Tafjord, Oyvind and\n Dalvi Mishra, Bhavana and\n Clark, Peter\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.134/\",\n doi = \"10.18653/v1/2022.emnlp-main.134\",\n pages = \"2078--2093\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.134.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.134/", + "pdf_size": 1593830, + "gs_citation": 58, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=650284566009280341&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Allen Institute for AI, Seattle, WA; Allen Institute for AI, Seattle, WA; Allen Institute for AI, Seattle, WA", + "aff_domain": "allenai.org;allenai.org;allenai.org", + "email": "allenai.org;allenai.org;allenai.org", + "github": "", + "project": "https://allenai.org/data/entailer2078", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Allen Institute for AI", + "aff_unique_dep": "", + "aff_unique_url": "https://allenai.org", + "aff_unique_abbr": "AI2", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Seattle", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.472", + "title": "Entity Embedding Completion for Wide-Coverage Entity Disambiguation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Entity disambiguation (ED) is typically solved by learning to classify a given mention into one of the entities in the model\u2019s entity vocabulary by referring to their embeddings. However, this approach cannot address mentions of entities that are not covered by the entity vocabulary. Aiming to enhance the applicability of ED models, we propose a method of extending a state-of-the-art ED model by dynamically computing embeddings of out-of-vocabulary entities. Specifically, our method computes embeddings from entity descriptions and mention contexts. Experiments with standard benchmark datasets show that the extended model performs comparable to or better than existing models whose entity embeddings are trained for all candidate entities as well as embedding-free models. We release our source code and model checkpoints at https://github.com/studio-ousia/steel.", + "author": "Daisuke Oba; Ikuya Yamada; Naoki Yoshinaga; Masashi Toyoda", + "authorids": "/d/daisuke-oba/; /i/ikuya-yamada/; /n/naoki-yoshinaga/; /m/masashi-toyoda/", + "bibtex": "@inproceedings{oba-etal-2022-entity,\n title = \"Entity Embedding Completion for Wide-Coverage Entity Disambiguation\",\n author = \"Oba, Daisuke and\n Yamada, Ikuya and\n Yoshinaga, Naoki and\n Toyoda, Masashi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.472/\",\n doi = \"10.18653/v1/2022.findings-emnlp.472\",\n pages = \"6333--6344\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.472.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.472/", + "pdf_size": 345473, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14181066485435046080&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff": "The University of Tokyo; Studio Ousia + RIKEN AIP; Institute of Industrial Science, The University of Tokyo; Institute of Industrial Science, The University of Tokyo", + "aff_domain": "tkl.iis.u-tokyo.ac.jp;ousia.jp;iis.u-tokyo.ac.jp;iis.u-tokyo.ac.jp", + "email": "tkl.iis.u-tokyo.ac.jp;ousia.jp;iis.u-tokyo.ac.jp;iis.u-tokyo.ac.jp", + "github": "https://github.com/studio-ousia/steel", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2;3;3", + "aff_unique_norm": "University of Tokyo;Studio Ousia;RIKEN;The University of Tokyo", + "aff_unique_dep": ";;Advanced Institute for Computational Science;Institute of Industrial Science", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.studioousia.com;https://www.aip.riken.jp;https://www.iis.u-tokyo.ac.jp/en/", + "aff_unique_abbr": "UTokyo;;RIKEN AIP;UTokyo", + "aff_campus_unique_index": ";1;1", + "aff_campus_unique": ";Tokyo", + "aff_country_unique_index": "0;0+0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.emnlp-main.61", + "title": "Entity Extraction in Low Resource Domains with Selective Pre-training of Large Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transformer-based language models trained on large natural language corpora have been very useful in downstream entity extraction tasks. However, they often result in poor performances when applied to domains that are different from those they are pretrained on. Continued pretraining using unlabeled data from target domains can help improve the performances of these language models on the downstream tasks. However, using all of the available unlabeled data for pretraining can be time-intensive; also, it can be detrimental to the performance of the downstream tasks, if the unlabeled data is not aligned with the data distribution for the target tasks. Previous works employed external supervision in the form of ontologies for selecting appropriate data samples for pretraining, but external supervision can be quite hard to obtain in low-resource domains. In this paper, we introduce effective ways to select data from unlabeled corpora of target domains for language model pretraining to improve the performances in target entity extraction tasks. Our data selection strategies do not require any external supervision. We conduct extensive experiments for the task of named entity recognition (NER) on seven different domains and show that language models pretrained on target domain unlabeled data obtained using our data selection strategies achieve better performances compared to those using data selection strategies in previous works that use external supervision. We also show that these pretrained language models using our data selection strategies outperform those pretrained on all of the available unlabeled target domain data.", + "author": "Aniruddha Mahapatra; Sharmila Reddy Nangi; Aparna Garimella; Anandhavelu N", + "authorids": "/a/aniruddha-mahapatra/; /s/sharmila-reddy-nangi/; /a/aparna-garimella/; /a/anandhavelu-n/", + "bibtex": "@inproceedings{mahapatra-etal-2022-entity,\n title = \"Entity Extraction in Low Resource Domains with Selective Pre-training of Large Language Models\",\n author = \"Mahapatra, Aniruddha and\n Nangi, Sharmila Reddy and\n Garimella, Aparna and\n N, Anandhavelu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.61/\",\n doi = \"10.18653/v1/2022.emnlp-main.61\",\n pages = \"942--951\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.61.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.61/", + "pdf_size": 1211738, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10994649785359184259&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 2, + "aff": "Carnegie Mellon University, USA+Adobe Research, India; Stanford University, USA+Adobe Research, India; Adobe Research, India; Adobe Research, India", + "aff_domain": "andrew.cmu.edu;stanford.edu;adobe.com;adobe.com", + "email": "andrew.cmu.edu;stanford.edu;adobe.com;adobe.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2+1;1;1", + "aff_unique_norm": "Carnegie Mellon University;Adobe Research;Stanford University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.cmu.edu;https://research.adobe.com;https://www.stanford.edu", + "aff_unique_abbr": "CMU;Adobe;Stanford", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0+1;0+1;1;1", + "aff_country_unique": "United States;India" + }, + { + "id": "2022.emnlp-main.551", + "title": "Entity-Focused Dense Passage Retrieval for Outside-Knowledge Visual Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Most Outside-Knowledge Visual Question Answering (OK-VQA) systems employ a two-stage framework that first retrieves external knowledge given the visual question and then predicts the answer based on the retrieved content. However, the retrieved knowledge is often inadequate. Retrievals are frequently too general and fail to cover specific knowledge needed to answer the question. Also, the naturally available supervision (whether the passage contains the correct answer) is weak and does not guarantee question relevancy. To address these issues, we propose an Entity-Focused Retrieval (EnFoRe) model that provides stronger supervision during training and recognizes question-relevant entities to help retrieve more specific knowledge. Experiments show that our EnFoRe model achieves superior retrieval performance on OK-VQA, the currently largest outside-knowledge VQA dataset. We also combine the retrieved knowledge with state-of-the-art VQA models, and achieve a new state-of-the-art performance on OK-VQA.", + "author": "Jialin Wu; Raymond Mooney", + "authorids": "/j/jialin-wu/; /r/raymond-mooney/", + "bibtex": "@inproceedings{wu-mooney-2022-entity,\n title = \"Entity-Focused Dense Passage Retrieval for Outside-Knowledge Visual Question Answering\",\n author = \"Wu, Jialin and\n Mooney, Raymond\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.551/\",\n doi = \"10.18653/v1/2022.emnlp-main.551\",\n pages = \"8061--8072\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.551.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.551/", + "pdf_size": 8122621, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12698811284099842395&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin", + "aff_domain": "utexas.edu;cs.utexas.edu", + "email": "utexas.edu;cs.utexas.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The University of Texas at Austin", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.671", + "title": "Entity-centered Cross-document Relation Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Relation Extraction (RE) is a fundamental task of information extraction, which has attracted a large amount of research attention. Previous studies focus on extracting the relations within a sentence or document, while currently researchers begin to explore cross-document RE. However, current cross-document RE methods directly utilize text snippets surrounding target entities in multiple given documents, which brings considerable noisy and non-relevant sentences. Moreover, they utilize all the text paths in a document bag in a coarse-grained way, without considering the connections between these text paths.In this paper, we aim to address both of these shortages and push the state-of-the-art for cross-document RE. First, we focus on input construction for our RE model and propose an entity-based document-context filter to retain useful information in the given documents by using the bridge entities in the text paths. Second, we propose a cross-document RE model based on cross-path entity relation attention, which allow the entity relations across text paths to interact with each other. We compare our cross-document RE method with the state-of-the-art methods in the dataset CodRED. Our method outperforms them by at least 10% in F1, thus demonstrating its effectiveness.", + "author": "Fengqi Wang; Fei Li; Hao Fei; Jingye Li; Shengqiong Wu; Fangfang Su; Wenxuan Shi; Donghong Ji; Bo Cai", + "authorids": "/f/fengqi-wang/; /f/fei-li/; /h/hao-fei/; /j/jingye-li/; /s/shengqiong-wu/; /f/fangfang-su/; /w/wenxuan-shi/; /d/donghong-ji/; /b/bo-cai/", + "bibtex": "@inproceedings{wang-etal-2022-entity,\n title = \"Entity-centered Cross-document Relation Extraction\",\n author = \"Wang, Fengqi and\n Li, Fei and\n Fei, Hao and\n Li, Jingye and\n Wu, Shengqiong and\n Su, Fangfang and\n Shi, Wenxuan and\n Ji, Donghong and\n Cai, Bo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.671/\",\n doi = \"10.18653/v1/2022.emnlp-main.671\",\n pages = \"9871--9881\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.671.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.671/", + "pdf_size": 629146, + "gs_citation": 83, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10140624252103578492&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University, China+School of Computing, National University of Singapore, Singapore; Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University, China; School of Computing, National University of Singapore, Singapore; Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University, China; School of Computing, National University of Singapore, Singapore; Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University, China; Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University, China; Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University, China; Key Laboratory of Aerospace Information Security and Trusted Computing, Ministry of Education, School of Cyber Science and Engineering, Wuhan University, China", + "aff_domain": "whu.edu.cn;whu.edu.cn;nus.edu.sg;whu.edu.cn;u.nus.edu;whu.edu.cn;whu.edu.cn;whu.edu.cn;whu.edu.cn", + "email": "whu.edu.cn;whu.edu.cn;nus.edu.sg;whu.edu.cn;u.nus.edu;whu.edu.cn;whu.edu.cn;whu.edu.cn;whu.edu.cn", + "github": "https://github.com/MakiseKuurisu/ecrim2020", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;0;1;0;1;0;0;0;0", + "aff_unique_norm": "Wuhan University;National University of Singapore", + "aff_unique_dep": "School of Cyber Science and Engineering;School of Computing", + "aff_unique_url": "http://www.whu.edu.cn/;https://www.nus.edu.sg", + "aff_unique_abbr": "WHU;NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;1;0;1;0;0;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.findings-emnlp.473", + "title": "Entity-level Interaction via Heterogeneous Graph for Multimodal Named Entity Recognition", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multimodal Named Entity Recognition (MNER) faces two specific challenges: 1) How to capture useful entity-related visual information. 2) How to alleviate the interference of visual noise. Previous works have gained progress by improving interacting mechanisms or seeking for better visual features. However, existing methods neglect the integrity of entity semantics and conduct cross-modal interaction at token-level, which cuts apart the semantics of entities and makes non-entity tokens easily interfered with by irrelevant visual noise. Thus in this paper, we propose an end-to-end heterogeneous Graph-based Entity-level Interacting model (GEI) for MNER. GEI first utilizes a span detection subtask to obtain entity representations, which serve as the bridge between two modalities. Then, the heterogeneous graph interacting network interacts entity with object nodes to capture entity-related visual information, and fuses it into only entity-associated tokens to rid non-entity tokens of the visual noise. Experiments on two widely used datasets demonstrate the effectiveness of our method. Our code will be available at https://github.com/GangZhao98/GEI.", + "author": "Gang Zhao; Guanting Dong; Yidong Shi; Haolong Yan; Weiran Xu; Si Li", + "authorids": "/g/gang-zhao/; /g/guanting-dong/; /y/yidong-shi/; /h/haolong-yan/; /w/weiran-xu/; /s/si-li/", + "bibtex": "@inproceedings{zhao-etal-2022-entity,\n title = \"Entity-level Interaction via Heterogeneous Graph for Multimodal Named Entity Recognition\",\n author = \"Zhao, Gang and\n Dong, Guanting and\n Shi, Yidong and\n Yan, Haolong and\n Xu, Weiran and\n Li, Si\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.473/\",\n doi = \"10.18653/v1/2022.findings-emnlp.473\",\n pages = \"6345--6350\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.473.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.473/", + "pdf_size": 974409, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16912409469305663931&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China; School of Artificial Intelligence, Beijing University of Posts and Telecommunications, China", + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "https://github.com/GangZhao98/GEI", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications", + "aff_unique_dep": "School of Artificial Intelligence", + "aff_unique_url": "http://www.bupt.edu.cn/", + "aff_unique_abbr": "BUPT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-industry.49", + "title": "Entity-level Sentiment Analysis in Contact Center Telephone Conversations", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Entity-level sentiment analysis predicts the sentiment about entities mentioned in a given text. It is very useful in a business context to understand user emotions towards certain entities, such as products or companies. In this paper, we demonstrate how we developed an entity-level sentiment analysis system that analyzes English telephone conversation transcripts in contact centers to provide business insight. We present two approaches, one entirely based on the transformer-based DistilBERT model, and another that uses a neural network supplemented with some heuristic rules.", + "author": "Xue-yong Fu; Cheng Chen; Md Tahmid Rahman Laskar; Shayna Gardiner; Pooja Hiranandani; Shashi Bhushan Tn", + "authorids": "/x/xue-yong-fu/; /c/cheng-chen/; /m/md-tahmid-rahman-laskar/; /s/shayna-gardiner/; /p/pooja-hiranandani/; /s/shashi-bhushan-tn/", + "bibtex": "@inproceedings{fu-etal-2022-entity,\n title = \"Entity-level Sentiment Analysis in Contact Center Telephone Conversations\",\n author = \"Fu, Xue-yong and\n Chen, Cheng and\n Laskar, Md Tahmid Rahman and\n Gardiner, Shayna and\n Hiranandani, Pooja and\n Tn, Shashi Bhushan\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.49/\",\n doi = \"10.18653/v1/2022.emnlp-industry.49\",\n pages = \"484--491\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.49.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.49/", + "pdf_size": 381549, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18046773947558788135&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6 + }, + { + "id": "2022.findings-emnlp.499", + "title": "EntityCS: Improving Zero-Shot Cross-lingual Transfer with Entity-Centric Code Switching", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Accurate alignment between languages is fundamental for improving cross-lingual pre-trained language models (XLMs). Motivated by the natural phenomenon of code-switching (CS) in multilingual speakers, CS has been used as an effective data augmentation method that offers language alignment at word- or phrase-level, in contrast to sentence-level via parallel instances. Existing approaches either use dictionaries or parallel sentences with word-alignment to generate CS data by randomly switching words in a sentence. However, such methods can be suboptimal as dictionaries disregard semantics, and syntax might become invalid after random word switching. In this work, we propose EntityCS, a method that focuses on Entity-level Code-Switching to capture fine-grained cross-lingual semantics without corrupting syntax. We use Wikidata and the English Wikipedia to construct an entity-centric CS corpus by switching entities to their counterparts in other languages. We further propose entity-oriented masking strategies during intermediate model training on the EntityCS corpus for improving entity prediction. Evaluation of the trained models on four entity-centric downstream tasks shows consistent improvements over the baseline with a notable increase of 10% in Fact Retrieval. We release the corpus and models to assist research on code-switching and enriching XLMs with external knowledge.", + "author": "Chenxi Whitehouse; Fenia Christopoulou; Ignacio Iacobacci", + "authorids": "/c/chenxi-whitehouse/; /f/fenia-christopoulou/; /i/ignacio-iacobacci/", + "bibtex": "@inproceedings{whitehouse-etal-2022-entitycs,\n title = \"{E}ntity{CS}: Improving Zero-Shot Cross-lingual Transfer with Entity-Centric Code Switching\",\n author = \"Whitehouse, Chenxi and\n Christopoulou, Fenia and\n Iacobacci, Ignacio\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.499/\",\n doi = \"10.18653/v1/2022.findings-emnlp.499\",\n pages = \"6698--6714\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.499.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.499/", + "pdf_size": 614149, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13126370684035779830&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Huawei Noah\u2019s Ark Lab, London, UK + City, University of London; Huawei Noah\u2019s Ark Lab, London, UK; Huawei Noah\u2019s Ark Lab, London, UK", + "aff_domain": "city.ac.uk;huawei.com;huawei.com", + "email": "city.ac.uk;huawei.com;huawei.com", + "github": "https://github.com/huawei-noah/noah-research/tree/master/NLP/EntityCS", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0;0", + "aff_unique_norm": "Huawei Noah\u2019s Ark Lab;City, University of London", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.huawei.com/en/ai;https://www.city.ac.uk", + "aff_unique_abbr": "HNA Lab;City, University of London", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "London;", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.632", + "title": "Entropy- and Distance-Based Predictors From GPT-2 Attention Patterns Predict Reading Times Over and Above GPT-2 Surprisal", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transformer-based large language models are trained to make predictions about the next word by aggregating representations of previous tokens through their self-attention mechanism. In the field of cognitive modeling, such attention patterns have recently been interpreted as embodying the process of cue-based retrieval, in which attention over multiple targets is taken to generate interference and latency during retrieval. Under this framework, this work first defines an entropy-based predictor that quantifies the diffuseness of self-attention, as well as distance-based predictors that capture the incremental change in attention patterns across timesteps. Moreover, following recent studies that question the informativeness of attention weights, we also experiment with alternative methods for incorporating vector norms into attention weights. Regression experiments using predictors calculated from the GPT-2 language model show that these predictors deliver a substantially better fit to held-out self-paced reading and eye-tracking data over a rigorous baseline including GPT-2 surprisal.", + "author": "Byung-Doh Oh; William Schuler", + "authorids": "/b/byung-doh-oh/; /w/william-schuler/", + "bibtex": "@inproceedings{oh-schuler-2022-entropy,\n title = \"Entropy- and Distance-Based Predictors From {GPT}-2 Attention Patterns Predict Reading Times Over and Above {GPT}-2 Surprisal\",\n author = \"Oh, Byung-Doh and\n Schuler, William\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.632/\",\n doi = \"10.18653/v1/2022.emnlp-main.632\",\n pages = \"9324--9334\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.632.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.632/", + "pdf_size": 989969, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10431341937888356685&as_sdt=20000005&sciodt=0,21&hl=en", + "gs_version_total": 5, + "aff": "Department of Linguistics, The Ohio State University; Department of Linguistics, The Ohio State University", + "aff_domain": "osu.edu;osu.edu", + "email": "osu.edu;osu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The Ohio State University", + "aff_unique_dep": "Department of Linguistics", + "aff_unique_url": "https://www.osu.edu", + "aff_unique_abbr": "OSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.720", + "title": "Entropy-Based Vocabulary Substitution for Incremental Learning in Multilingual Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In a practical real-world scenario, the longstanding goal is that a universal multilingual translation model can be incrementally updated when new language pairs arrive. Specifically, the initial vocabulary only covers some of the words in new languages, which hurts the translation quality for incremental learning. Although existing approaches attempt to address this issue by replacing the original vocabulary with a rebuilt vocabulary or constructing independent language-specific vocabularies, these methods can not meet the following three demands simultaneously: (1) High translation quality for original and incremental languages, (2) low cost for model training, (3) low time overhead for preprocessing. In this work, we propose an entropy-based vocabulary substitution (EVS) method that just needs to walk through new language pairs for incremental learning in a large-scale multilingual data updating while remaining the size of the vocabulary. Our method has access to learn new knowledge from updated training samples incrementally while keeping high translation quality for original language pairs, alleviating the issue of catastrophic forgetting. Results of experiments show that EVS can achieve better performance and save excess overhead for incremental learning in the multilingual machine translation task.", + "author": "Kaiyu Huang; Peng Li; Jin Ma; Yang Liu", + "authorids": "/k/kaiyu-huang/; /p/peng-li/; /j/jin-ma/; /y/yang-liu/", + "bibtex": "@inproceedings{huang-etal-2022-entropy,\n title = \"Entropy-Based Vocabulary Substitution for Incremental Learning in Multilingual Neural Machine Translation\",\n author = \"Huang, Kaiyu and\n Li, Peng and\n Ma, Jin and\n Liu, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.720/\",\n doi = \"10.18653/v1/2022.emnlp-main.720\",\n pages = \"10537--10550\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.720.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.720/", + "pdf_size": 1067748, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11343573182814821483&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Institute for AI Industry Research, Tsinghua University, Beijing, China+Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China+Beijing National Research Center for Information Science and Technology+Beijing Academy of Artificial Intelligence, Beijing, China; Institute for AI Industry Research, Tsinghua University, Beijing, China+Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China+Beijing National Research Center for Information Science and Technology+Beijing Academy of Artificial Intelligence, Beijing, China+Tencent+International Innovation Center of Tsinghua University, Shanghai, China+Quan Cheng Laboratory; Tencent+Sch. of Comp. Sci. & Tech., University of Science and Technology of China; Institute for AI Industry Research, Tsinghua University, Beijing, China+Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China+Beijing National Research Center for Information Science and Technology+Beijing Academy of Artificial Intelligence, Beijing, China+Tencent+International Innovation Center of Tsinghua University, Shanghai, China+Quan Cheng Laboratory", + "aff_domain": "air.tsinghua.edu.cn;air.tsinghua.edu.cn;mail.ustc.edu.cn;tsinghua.edu.cn", + "email": "air.tsinghua.edu.cn;air.tsinghua.edu.cn;mail.ustc.edu.cn;tsinghua.edu.cn", + "github": "https://github.com/koukaiu/evs", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0+1+2;0+0+1+2+3+0+4;3+5;0+0+1+2+3+0+4", + "aff_unique_norm": "Tsinghua University;Beijing National Research Center for Information Science and Technology;Beijing Academy of Artificial Intelligence;Tencent Holdings Limited;Quan Cheng Laboratory;University of Science and Technology of China", + "aff_unique_dep": "Institute for AI Industry Research;;;;;School of Computer Science and Technology", + "aff_unique_url": "https://www.tsinghua.edu.cn;;https://www.baaic.cn;https://www.tencent.com;;http://www.ustc.edu.cn", + "aff_unique_abbr": "Tsinghua;;BAAI;Tencent;;USTC", + "aff_campus_unique_index": "0+0+0;0+0+0+2;;0+0+0+2", + "aff_campus_unique": "Beijing;;Shanghai", + "aff_country_unique_index": "0+0+0+0;0+0+0+0+0+0;0+0;0+0+0+0+0+0", + "aff_country_unique": "China;" + }, + { + "id": "2022.emnlp-main.18", + "title": "Estimating Soft Labels for Out-of-Domain Intent Detection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Out-of-Domain (OOD) intent detection is important for practical dialog systems. To alleviate the issue of lacking OOD training samples, some works propose synthesizing pseudo OOD samples and directly assigning one-hot OOD labels to these pseudo samples. However, these one-hot labels introduce noises to the training process because some \u201chard\u201d pseudo OOD samples may coincide with In-Domain (IND) intents. In this paper, we propose an adaptive soft pseudo labeling (ASoul) method that can estimate soft labels for pseudo OOD samples when training OOD detectors. Semantic connections between pseudo OOD samples and IND intents are captured using an embedding graph. A co-training framework is further introduced to produce resulting soft labels following the smoothness assumption, i.e., close samples are likely to have similar labels. Extensive experiments on three benchmark datasets show that ASoul consistently improves the OOD detection performance and outperforms various competitive baselines.", + "author": "Hao Lang; Yinhe Zheng; Jian Sun; Fei Huang; Luo Si; Yongbin Li", + "authorids": "/h/hao-lang/; /y/yinhe-zheng/; /j/jian-sun/; /f/fei-huang/; /l/luo-si/; /y/yongbin-li/", + "bibtex": "@inproceedings{lang-etal-2022-estimating,\n title = \"Estimating Soft Labels for Out-of-Domain Intent Detection\",\n author = \"Lang, Hao and\n Zheng, Yinhe and\n Sun, Jian and\n Huang, Fei and\n Si, Luo and\n Li, Yongbin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.18/\",\n doi = \"10.18653/v1/2022.emnlp-main.18\",\n pages = \"261--276\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.18.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.18/", + "pdf_size": 653431, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11353196877999276127&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group", + "aff_domain": "alibaba-inc.com;163.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "alibaba-inc.com;163.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.299", + "title": "Ethics consideration sections in natural language processing papers", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we present the results of a manual classification of all ethical consideration sections for ACL 2021. We also compare how many papers had an ethics consideration section per track and per world region in ACL 2021. We classified papers according to the ethical issues covered (research benefits, potential harms, and vulnerable groups affected) and whether the paper was marked as requiring ethics review by at least one reviewer. Moreover, we discuss recurring obstacles we have observed (highlighting some interesting texts we found along the way) and conclude with three suggestions. We think that this paper may be useful for anyone who needs to write \u2014 or review \u2014 an ethics section and would like to get an overview of what others have done.", + "author": "Luciana Benotti; Patrick Blackburn", + "authorids": "/l/luciana-benotti/; /p/patrick-blackburn/", + "bibtex": "@inproceedings{benotti-blackburn-2022-ethics,\n title = \"Ethics consideration sections in natural language processing papers\",\n author = \"Benotti, Luciana and\n Blackburn, Patrick\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.299/\",\n doi = \"10.18653/v1/2022.emnlp-main.299\",\n pages = \"4509--4516\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.299.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.299/", + "pdf_size": 183134, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14630620734606892360&as_sdt=20000005&sciodt=0,21&hl=en", + "gs_version_total": 3, + "aff": "Universidad Nacional de C\u00f3rdoba, Via Libre, CONICET, Argentina; Philosophy and Science Studies, IKH, Roskilde University, Denmark", + "aff_domain": "unc.edu.ar;ruc.dk", + "email": "unc.edu.ar;ruc.dk", + "github": "", + "project": "https://2021.aclweb.org/calls/papers/", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Universidad Nacional de C\u00f3rdoba;Roskilde University", + "aff_unique_dep": "Via Libre;Philosophy and Science Studies, IKH", + "aff_unique_url": "https://www.unc.edu.ar;https://www.ruc.dk", + "aff_unique_abbr": "UNC;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Argentina;Denmark" + }, + { + "id": "2022.findings-emnlp.403", + "title": "EtriCA: Event-Triggered Context-Aware Story Generation Augmented by Cross Attention", + "track": "main", + "status": "finding", + "award": false, + "abstract": "One of the key challenges of automatic story generation is how to generate a long narrative that can maintain fluency, relevance, and coherence. Despite recent progress, current story generation systems still face the challenge of how to effectively capture contextual and event features, which has a profound impact on a model\u2019s generation performance. To address these challenges, we present EtriCA, a novel neural generation model, which improves the relevance and coherence of the generated stories through residually mapping context features to event sequences with a cross-attention mechanism. Such a feature capturing mechanism allows our model to better exploit the logical relatedness between events when generating stories. Extensive experiments based on both automatic and human evaluations show that our model significantly outperforms state-of-the-art baselines, demonstrating the effectiveness of our model in leveraging context and event features.", + "author": "Chen Tang; Chenghua Lin; Henglin Huang; Frank Guerin; Zhihao Zhang", + "authorids": "/c/chen-tang/; /c/chenghua-lin/; /h/henglin-huang/; /f/frank-guerin/; /z/zhihao-zhang/", + "bibtex": "@inproceedings{tang-etal-2022-etrica,\n title = \"{E}tri{CA}: Event-Triggered Context-Aware Story Generation Augmented by Cross Attention\",\n author = \"Tang, Chen and\n Lin, Chenghua and\n Huang, Henglin and\n Guerin, Frank and\n Zhang, Zhihao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.403/\",\n doi = \"10.18653/v1/2022.findings-emnlp.403\",\n pages = \"5504--5518\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.403.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.403/", + "pdf_size": 1101630, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5079927141814630903&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "Department of Computer Science, The University of Surrey, UK; Department of Computer Science, The University of Sheffield, UK; Department of Computer Science, The University of Surrey, UK; Department of Computer Science, The University of Surrey, UK; School of Economics and Management, Beihang University, Beijing, China", + "aff_domain": "surrey.ac.uk;sheffield.ac.uk;surrey.ac.uk;surrey.ac.uk;buaa.edu.cn", + "email": "surrey.ac.uk;sheffield.ac.uk;surrey.ac.uk;surrey.ac.uk;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;2", + "aff_unique_norm": "The University of Surrey;The University of Sheffield;Beihang University", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science;School of Economics and Management", + "aff_unique_url": "https://www.surrey.ac.uk;https://www.sheffield.ac.uk;http://www.buaa.edu.cn", + "aff_unique_abbr": "Surrey;Sheffield;Beihang", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "United Kingdom;China" + }, + { + "id": "2022.emnlp-main.129", + "title": "EvEntS ReaLM: Event Reasoning of Entity States via Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This paper investigates models of event implications. Specifically, how well models predict entity state-changes, by targeting their understanding of physical attributes. Nominally, Large Language models (LLM) have been exposed to procedural knowledge about how objects interact, yet our benchmarking shows they fail to reason about the world. Conversely, we also demonstrate that existing approaches often misrepresent the surprising abilities of LLMs via improper task encodings and that proper model prompting can dramatically improve performance of reported baseline results across multiple tasks. In particular, our results indicate that our prompting technique is especially useful for unseen attributes (out-of-domain) or when only limited data is available.", + "author": "Evangelia Spiliopoulou; Artidoro Pagnoni; Yonatan Bisk; Eduard Hovy", + "authorids": "/e/evangelia-spiliopoulou/; /a/artidoro-pagnoni/; /y/yonatan-bisk/; /e/eduard-hovy/", + "bibtex": "@inproceedings{spiliopoulou-etal-2022-events,\n title = \"{E}v{E}nt{S} {R}ea{LM}: Event Reasoning of Entity States via Language Models\",\n author = \"Spiliopoulou, Evangelia and\n Pagnoni, Artidoro and\n Bisk, Yonatan and\n Hovy, Eduard\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.129/\",\n doi = \"10.18653/v1/2022.emnlp-main.129\",\n pages = \"1982--1997\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.129.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.129/", + "pdf_size": 582236, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16880283057876741452&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Amazon + AWS, AI Labs; Univ. of Washington; Carnegie Mellon University; Carnegie Mellon University", + "aff_domain": "amazon.com;cs.washington.edu;cs.cmu.edu;cs.cmu.edu", + "email": "amazon.com;cs.washington.edu;cs.cmu.edu;cs.cmu.edu", + "github": "https://github.com/spilioeve/eventsrealm", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;3;3", + "aff_unique_norm": "Amazon.com, Inc.;Amazon Web Services;University of Washington;Carnegie Mellon University", + "aff_unique_dep": ";AI Labs;;", + "aff_unique_url": "https://www.amazon.com;https://aws.amazon.com;https://www.washington.edu;https://www.cmu.edu", + "aff_unique_abbr": "Amazon;AWS;UW;CMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.745", + "title": "Evade the Trap of Mediocrity: Promoting Diversity and Novelty in Text Generation via Concentrating Attention", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently, powerful Transformer architectures have proven superior in generating high-quality sentences. Nevertheless, these models tend to produce dull high-frequency phrases, severely hurting the diversity and novelty of generated text. In this work, we dig into the intrinsic mechanism of this problem and found that sparser attention values in Transformer could improve diversity. To understand such a phenomenon, we first conduct both empirical and theoretical analysis and then attribute it to representation degeneration caused by the attentive mixture of the hidden states during training. We term this process the Trap of Mediocrity. To escape from such a trap, we introduce a novel attention regularization loss to control the sharpness of the attention distribution, which is transparent to model structures and can be easily implemented within 20 lines of python code. We prove that this method could be mathematically regarded as learning a Bayesian approximation of posterior attention. Experiments show that our method improved the diversity and novelty of the generated text while maintaining comparable quality on a variety of conditional and unconditional generation tasks.", + "author": "Wenhao Li; Xiaoyuan Yi; Jinyi Hu; Maosong Sun; Xing Xie", + "authorids": "/w/wenhao-li/; /x/xiaoyuan-yi/; /j/jinyi-hu/; /m/maosong-sun/; /x/xing-xie/", + "bibtex": "@inproceedings{li-etal-2022-evade,\n title = \"Evade the Trap of Mediocrity: Promoting Diversity and Novelty in Text Generation via Concentrating Attention\",\n author = \"Li, Wenhao and\n Yi, Xiaoyuan and\n Hu, Jinyi and\n Sun, Maosong and\n Xie, Xing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.745/\",\n doi = \"10.18653/v1/2022.emnlp-main.745\",\n pages = \"10834--10858\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.745.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.745/", + "pdf_size": 1115195, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3067766241234051308&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.319", + "title": "Evaluating Parameter Efficient Learning for Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Parameter efficient learning methods (PERMs)have recently gained significant attention asthey provide an efficient way for pre-trainedlanguage models (PLMs) to adapt to a downstream task. However, these conclusions aremostly drawn from in-domain evaluations overthe full training set. In this paper, we presentcomparisons between PERMs and finetuningfrom three new perspectives: (1) the effect ofsample and model size to in-domain evaluations, (2) generalization to unseen domains andnew datasets, and (3) the faithfulness of generations. Our results show that for in-domainsettings (a) there is a cross point of samplesize for which PERMs will perform better thanfinetuning when training with fewer samples,and (b) larger PLMs have larger cross points.For cross-domain and cross-dataset cases, weshow that (a) Adapter (Houlsby et al., 2019)performs the best amongst all the PERMs studied here, and (b) it outperforms finetuning ifthe task dataset is below a certain size. Wealso compare the faithfulness of generationsand show that PERMs can achieve better faithfulness score than finetuning, especially forsmall training set, by as much as 6%. Finally,we apply Adapter to MT-NLG 530b (Smithet al., 2022) and achieve new state-of-the-artresults on Xsum (Narayan et al., 2018) for allROUGE scores (ROUGE-1 49.17, ROUGE-227.20, ROUGE-L 40.98).", + "author": "Peng Xu; Mostofa Patwary; Shrimai Prabhumoye; Virginia Adams; Ryan Prenger; Wei Ping; Nayeon Lee; Mohammad Shoeybi; Bryan Catanzaro", + "authorids": "/p/peng-xu/; /m/mostofa-patwary/; /s/shrimai-prabhumoye/; /v/virginia-adams/; /r/ryan-prenger/; /w/wei-ping/; /n/nayeon-lee/; /m/mohammad-shoeybi/; /b/bryan-catanzaro/", + "bibtex": "@inproceedings{xu-etal-2022-evaluating,\n title = \"Evaluating Parameter Efficient Learning for Generation\",\n author = \"Xu, Peng and\n Patwary, Mostofa and\n Prabhumoye, Shrimai and\n Adams, Virginia and\n Prenger, Ryan and\n Ping, Wei and\n Lee, Nayeon and\n Shoeybi, Mohammad and\n Catanzaro, Bryan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.319/\",\n doi = \"10.18653/v1/2022.emnlp-main.319\",\n pages = \"4824--4833\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.319.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.319/", + "pdf_size": 274206, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11618153486613505089&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 3, + "aff": "NVIDIA; NVIDIA; NVIDIA; NVIDIA; NVIDIA; NVIDIA; The Hong Kong University of Science and Technology; NVIDIA; NVIDIA", + "aff_domain": "nvidia.com; ; ; ; ; ; ; ; ", + "email": "nvidia.com; ; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;1;0;0", + "aff_unique_norm": "NVIDIA Corporation;Hong Kong University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.nvidia.com;https://www.ust.hk", + "aff_unique_abbr": "NVIDIA;HKUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;1;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.78", + "title": "Evaluating Token-Level and Passage-Level Dense Retrieval Models for Math Information Retrieval", + "track": "main", + "status": "finding", + "award": false, + "abstract": "With the recent success of dense retrieval methods based on bi-encoders, studies have applied this approach to various interesting downstream retrieval tasks with good efficiency and in-domain effectiveness.Recently, we have also seen the presence of dense retrieval models in Math Information Retrieval (MIR) tasks,but the most effective systems remain classic retrieval methods that consider hand-crafted structure features.In this work, we try to combine the best of both worlds: a well-defined structure search method for effective formula search and efficient bi-encoder dense retrieval models to capture contextual similarities.Specifically, we have evaluated two representative bi-encoder models for token-level and passage-level dense retrieval on recent MIR tasks.Our results show that bi-encoder models are highly complementary to existing structure search methods, and we are able to advance the state-of-the-art on MIR datasets.", + "author": "Wei Zhong; Jheng-Hong Yang; Yuqing Xie; Jimmy Lin", + "authorids": "/w/wei-zhong/; /j/jheng-hong-yang/; /y/yuqing-xie/; /j/jimmy-lin/", + "bibtex": "@inproceedings{zhong-etal-2022-evaluating,\n title = \"Evaluating Token-Level and Passage-Level Dense Retrieval Models for Math Information Retrieval\",\n author = \"Zhong, Wei and\n Yang, Jheng-Hong and\n Xie, Yuqing and\n Lin, Jimmy\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.78/\",\n doi = \"10.18653/v1/2022.findings-emnlp.78\",\n pages = \"1092--1102\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.78.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.78/", + "pdf_size": 301935, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14288106764870118513&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff": "David R. Cheriton School of Computer Science, University of Waterloo; David R. Cheriton School of Computer Science, University of Waterloo; David R. Cheriton School of Computer Science, University of Waterloo; David R. Cheriton School of Computer Science, University of Waterloo", + "aff_domain": "uwaterloo.ca;uwaterloo.ca;uwaterloo.ca;uwaterloo.ca", + "email": "uwaterloo.ca;uwaterloo.ca;uwaterloo.ca;uwaterloo.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Waterloo", + "aff_unique_dep": "David R. Cheriton School of Computer Science", + "aff_unique_url": "https://uwaterloo.ca", + "aff_unique_abbr": "UWaterloo", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "2022.emnlp-main.654", + "title": "Evaluating and Improving Factuality in Multimodal Abstractive Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Current metrics for evaluating factuality for abstractive document summarization have achieved high correlations with human judgment, but they do not account for the vision modality and thus are not adequate for vision-and-language summarization. We propose CLIPBERTSCORE, a simple weighted combination of CLIPScore and BERTScore to leverage the robustness and strong factuality detection performance between image-summary and document-summary, respectively. Next, due to the lack of meta-evaluation benchmarks to evaluate the quality of multimodal factuality metrics, we collect human judgments of factuality with respect to documents and images. We show that this simple combination of two metrics in the zero-shot setting achieves higher correlations than existing factuality metrics for document summarization, outperforms an existing multimodal summarization metric, and performs competitively with strong multimodal factuality metrics specifically fine-tuned for the task. Our thorough analysis demonstrates the robustness and high correlation of CLIPBERTSCORE and its components on four factuality metric-evaluation benchmarks. Finally, we demonstrate two practical downstream applications of our CLIPBERTSCORE metric: for selecting important images to focus on during training, and as a reward for reinforcement learning to improve factuality of multimodal summary generation w.r.t automatic and human evaluation.", + "author": "David Wan; Mohit Bansal", + "authorids": "/d/david-wan/; /m/mohit-bansal/", + "bibtex": "@inproceedings{wan-bansal-2022-evaluating,\n title = \"Evaluating and Improving Factuality in Multimodal Abstractive Summarization\",\n author = \"Wan, David and\n Bansal, Mohit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.654/\",\n doi = \"10.18653/v1/2022.emnlp-main.654\",\n pages = \"9632--9648\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.654.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.654/", + "pdf_size": 1088263, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7040099162362393372&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of North Carolina at Chapel Hill; University of North Carolina at Chapel Hill", + "aff_domain": "cs.unc.edu;cs.unc.edu", + "email": "cs.unc.edu;cs.unc.edu", + "github": "https://github.com/meetdavidwan/faithful-multimodal-summarization", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of North Carolina", + "aff_unique_dep": "", + "aff_unique_url": "https://www.unc.edu", + "aff_unique_abbr": "UNC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chapel Hill", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.125", + "title": "Evaluating the Faithfulness of Importance Measures in NLP by Recursively Masking Allegedly Important Tokens and Retraining", + "track": "main", + "status": "finding", + "award": false, + "abstract": "To explain NLP models a popular approach is to use importance measures, such as attention, which inform input tokens are important for making a prediction. However, an open question is how well these explanations accurately reflect a model\u2019s logic, a property called faithfulness. To answer this question, we propose Recursive ROAR, a new faithfulness metric. This works by recursively masking allegedly important tokens and then retraining the model. The principle is that this should result in worse model performance compared to masking random tokens. The result is a performance curve given a masking-ratio. Furthermore, we propose a summarizing metric using area-between-curves (ABC), which allows for easy comparison across papers, models, and tasks. We evaluate 4 different importance measures on 8 different datasets, using both LSTM-attention models and RoBERTa models. We find that the faithfulness of importance measures is both model-dependent and task-dependent. This conclusion contradicts previous evaluations in both computer vision and faithfulness of attention literature.", + "author": "Andreas Madsen; Nicholas Meade; Vaibhav Adlakha; Siva Reddy", + "authorids": "/a/andreas-madsen/; /n/nicholas-meade/; /v/vaibhav-adlakha/; /s/siva-reddy/", + "bibtex": "@inproceedings{madsen-etal-2022-evaluating,\n title = \"Evaluating the Faithfulness of Importance Measures in {NLP} by Recursively Masking Allegedly Important Tokens and Retraining\",\n author = \"Madsen, Andreas and\n Meade, Nicholas and\n Adlakha, Vaibhav and\n Reddy, Siva\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.125/\",\n doi = \"10.18653/v1/2022.findings-emnlp.125\",\n pages = \"1731--1751\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.125.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.125/", + "pdf_size": 697209, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16501988393593199471&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff": "Mila \u2013 Quebec AI Institute+Polytechnique Montr\u00e9al; Mila \u2013 Quebec AI Institute+McGill University; Mila \u2013 Quebec AI Institute+McGill University; Mila \u2013 Quebec AI Institute+McGill University+Facebook CIFAR AI Chair", + "aff_domain": "mila.quebec;mila.quebec;mila.quebec;mila.quebec", + "email": "mila.quebec;mila.quebec;mila.quebec;mila.quebec", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+2;0+2;0+2+3", + "aff_unique_norm": "Quebec AI Institute;Polytechnique Montr\u00e9al;McGill University;Facebook", + "aff_unique_dep": "Mila;;;Facebook CIFAR AI", + "aff_unique_url": "https://mila.quebec;https://www.polymtl.ca;https://www.mcgill.ca;https://www.facebook.com", + "aff_unique_abbr": "Mila;PolyMTL;McGill;FB", + "aff_campus_unique_index": "1;;;", + "aff_campus_unique": ";Montr\u00e9al", + "aff_country_unique_index": "0+0;0+0;0+0;0+0+1", + "aff_country_unique": "Canada;United States" + }, + { + "id": "2022.emnlp-main.624", + "title": "Evaluating the Impact of Model Scale for Compositional Generalization in Semantic Parsing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite their strong performance on many tasks, pre-trained language models have been shown to struggle on out-of-distribution compositional generalization. Meanwhile, recent work has shown considerable improvements on many NLP tasks from model scaling. Can scaling up model size also improve compositional generalization in semantic parsing? We evaluate encoder-decoder models up to 11B parameters and decoder-only models up to 540B parameters, and compare model scaling curves for three different methods for applying a pre-trained language model to a new task: fine-tuning all parameters, prompt tuning, and in-context learning. We observe that fine-tuning generally has flat or negative scaling curves on out-of-distribution compositional generalization in semantic parsing evaluations. In-context learning has positive scaling curves, but is generally outperformed by much smaller fine-tuned models. Prompt-tuning can outperform fine-tuning, suggesting further potential improvements from scaling as it exhibits a more positive scaling curve. Additionally, we identify several error trends that vary with model scale. For example, larger models are generally better at modeling the syntax of the output space, but are also more prone to certain types of overfitting. Overall, our study highlights limitations of current techniques for effectively leveraging model scale for compositional generalization, while our analysis also suggests promising directions for future work.", + "author": "Linlu Qiu; Peter Shaw; Panupong Pasupat; Tianze Shi; Jonathan Herzig; Emily Pitler; Fei Sha; Kristina Toutanova", + "authorids": "/l/linlu-qiu/; /p/peter-shaw/; /p/panupong-pasupat/; /t/tianze-shi/; /j/jonathan-herzig/; /e/emily-pitler/; /f/fei-sha/; /k/kristina-toutanova/", + "bibtex": "@inproceedings{qiu-etal-2022-evaluating,\n title = \"Evaluating the Impact of Model Scale for Compositional Generalization in Semantic Parsing\",\n author = \"Qiu, Linlu and\n Shaw, Peter and\n Pasupat, Panupong and\n Shi, Tianze and\n Herzig, Jonathan and\n Pitler, Emily and\n Sha, Fei and\n Toutanova, Kristina\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.624/\",\n doi = \"10.18653/v1/2022.emnlp-main.624\",\n pages = \"9157--9179\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.624.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.624/", + "pdf_size": 8903671, + "gs_citation": 56, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15571786297330437402&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Massachusetts Institute of Technology; Google Research; Google Research; Google Research; Google Research; Google Research; Google Research; Google Research", + "aff_domain": "mit.edu;google.com;google.com;google.com;google.com;google.com;google.com;google.com", + "email": "mit.edu;google.com;google.com;google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;1;1;1;1;1;1", + "aff_unique_norm": "Massachusetts Institute of Technology;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://web.mit.edu;https://research.google", + "aff_unique_abbr": "MIT;Google Research", + "aff_campus_unique_index": "1;1;1;1;1;1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.718", + "title": "Evaluating the Knowledge Dependency of Questions", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The automatic generation of Multiple Choice Questions (MCQ) has the potential to reduce the time educators spend on student assessment significantly. However, existing evaluation metrics for MCQ generation, such as BLEU, ROUGE, and METEOR, focus on the n-gram based similarity of the generated MCQ to the gold sample in the dataset and disregard their educational value.They fail to evaluate the MCQ\u2019s ability to assess the student\u2019s knowledge of the corresponding target fact. To tackle this issue, we propose a novel automatic evaluation metric, coined Knowledge Dependent Answerability (KDA), which measures the MCQ\u2019s answerability given knowledge of the target fact. Specifically, we first show how to measure KDA based on student responses from a human survey.Then, we propose two automatic evaluation metrics, KDA_disc and KDA_cont, that approximate KDA by leveraging pre-trained language models to imitate students\u2019 problem-solving behavior.Through our human studies, we show that KDA_disc and KDA_soft have strong correlations with both (1) KDA and (2) usability in an actual classroom setting, labeled by experts. Furthermore, when combined with n-gram based similarity metrics, KDA_disc and KDA_cont are shown to have a strong predictive power for various expert-labeled MCQ quality measures.", + "author": "Hyeongdon Moon; Yoonseok Yang; Hangyeol Yu; Seunghyun Lee; Myeongho Jeong; Juneyoung Park; Jamin Shin; Minsam Kim; Seungtaek Choi", + "authorids": "/h/hyeongdon-moon/; /y/yoonseok-yang/; /h/hangyeol-yu/; /s/seunghyun-lee/; /m/myeongho-jeong/; /j/juneyoung-park/; /j/jamin-shin/; /m/minsam-kim/; /s/seungtaek-choi/", + "bibtex": "@inproceedings{moon-etal-2022-evaluating,\n title = \"Evaluating the Knowledge Dependency of Questions\",\n author = \"Moon, Hyeongdon and\n Yang, Yoonseok and\n Yu, Hangyeol and\n Lee, Seunghyun and\n Jeong, Myeongho and\n Park, Juneyoung and\n Shin, Jamin and\n Kim, Minsam and\n Choi, Seungtaek\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.718/\",\n doi = \"10.18653/v1/2022.emnlp-main.718\",\n pages = \"10512--10526\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.718.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.718/", + "pdf_size": 1248200, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11270020519389278285&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Riiid AI Research\u2020; UC Berkeley\u2020; Riiid AI Research\u2020+NAVER AI Lab; Riiid AI Research; Riiid AI Research; Riiid AI Research; Riiid AI Research; Riiid AI Research\u2020; Riiid AI Research\u2020", + "aff_domain": "riiid.co;berkeley.edu;gmail.com; ; ; ; ;riiid.co;riiid.co", + "email": "riiid.co;berkeley.edu;gmail.com; ; ; ; ;riiid.co;riiid.co", + "github": "https://github.com/riiid/question-score", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;0+2;0;0;0;0;0;0", + "aff_unique_norm": "Riiid;University of California, Berkeley;NAVER Corporation", + "aff_unique_dep": "AI Research;;NAVER AI Lab", + "aff_unique_url": "https://www.riiid.com;https://www.berkeley.edu;https://www.naver.com", + "aff_unique_abbr": "Riiid;UC Berkeley;NAVER", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Berkeley", + "aff_country_unique_index": "0;1;0+0;0;0;0;0;0;0", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "2022.findings-emnlp.176", + "title": "Event-Centric Question Answering via Contrastive Learning and Invertible Event Transformation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Human reading comprehension often requires reasoning of event semantic relations in narratives, represented by Event-centric Question-Answering (QA). To address event-centric QA, we propose a novel QA model with contrastive learning and invertible event transformation, call TranCLR. Our proposed model utilizes an invertible transformation matrix to project semantic vectors of events into a common event embedding space, trained with contrastive learning, and thus naturally inject event semantic knowledge into mainstream QA pipelines. The transformation matrix is fine-tuned with the annotated event relation types between events that occurred in questions and those in answers, using event-aware question vectors. Experimental results on the Event Semantic Relation Reasoning (ESTER) dataset show significant improvements in both generative and extractive settings compared to the existing strong baselines, achieving over 8.4% gain in the token-level F1 score and 3.0% gain in Exact Match (EM) score under the multi-answer setting. Qualitative analysis reveals the high quality of the generated answers by TranCLR, demonstrating the feasibility of injecting event knowledge into QA model learning. Our code and models can be found at https://github.com/LuJunru/TranCLR.", + "author": "Junru Lu; Xingwei Tan; Gabriele Pergola; Lin Gui; Yulan He", + "authorids": "/j/junru-lu/; /x/xingwei-tan/; /g/gabriele-pergola/; /l/lin-gui/; /y/yulan-he/", + "bibtex": "@inproceedings{lu-etal-2022-event,\n title = \"Event-Centric Question Answering via Contrastive Learning and Invertible Event Transformation\",\n author = \"Lu, Junru and\n Tan, Xingwei and\n Pergola, Gabriele and\n Gui, Lin and\n He, Yulan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.176/\",\n doi = \"10.18653/v1/2022.findings-emnlp.176\",\n pages = \"2377--2389\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.176.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.176/", + "pdf_size": 2333048, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9190041506413813833&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 7, + "aff": "Department of Computer Science, University of Warwick, UK; Department of Computer Science, University of Warwick, UK; Department of Computer Science, University of Warwick, UK; Department of Informatics, King\u2019s College London, UK + The Alan Turing Institute, UK; Department of Computer Science, University of Warwick, UK + Department of Informatics, King\u2019s College London, UK + The Alan Turing Institute, UK", + "aff_domain": "warwick.ac.uk;warwick.ac.uk;warwick.ac.uk;kcl.ac.uk;kcl.ac.uk", + "email": "warwick.ac.uk;warwick.ac.uk;warwick.ac.uk;kcl.ac.uk;kcl.ac.uk", + "github": "https://github.com/LuJunru/TranCLR", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1+2;0+1+2", + "aff_unique_norm": "University of Warwick;King\u2019s College London;The Alan Turing Institute", + "aff_unique_dep": "Department of Computer Science;Department of Informatics;", + "aff_unique_url": "https://warwick.ac.uk;https://www.kcl.ac.uk;https://www.turing.ac.uk", + "aff_unique_abbr": "Warwick;KCL;ATI", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;0+0+0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.283", + "title": "Evidence > Intuition: Transferability Estimation for Encoder Selection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "With the increase in availability of large pre-trained language models (LMs) in Natural Language Processing (NLP), it becomes critical to assess their fit for a specific target task a priori\u2014as fine-tuning the entire space of available LMs is computationally prohibitive and unsustainable. However, encoder transferability estimation has received little to no attention in NLP. In this paper, we propose to generate quantitative evidence to predict which LM, out of a pool of models, will perform best on a target task without having to fine-tune all candidates. We provide a comprehensive study on LM ranking for 10 NLP tasks spanning the two fundamental problem types of classification and structured prediction. We adopt the state-of-the-art Logarithm of Maximum Evidence (LogME) measure from Computer Vision (CV) and find that it positively correlates with final LM performance in 94% of the setups.In the first study of its kind, we further compare transferability measures with the de facto standard of human practitioner ranking, finding that evidence from quantitative metrics is more robust than pure intuition and can help identify unexpected LM candidates.", + "author": "Elisa Bassignana; Max M\u00fcller-Eberstein; Mike Zhang; Barbara Plank", + "authorids": "/e/elisa-bassignana/; /m/max-muller-eberstein/; /m/mike-zhang/; /b/barbara-plank/", + "bibtex": "@inproceedings{bassignana-etal-2022-evidence,\n title = \"Evidence {\\ensuremath{>}} Intuition: Transferability Estimation for Encoder Selection\",\n author = {Bassignana, Elisa and\n M{\\\"u}ller-Eberstein, Max and\n Zhang, Mike and\n Plank, Barbara},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.283/\",\n doi = \"10.18653/v1/2022.emnlp-main.283\",\n pages = \"4218--4227\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.283.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.283/", + "pdf_size": 400106, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15851462441420121012&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff": "\u263cDepartment of Computer Science, IT University of Copenhagen, Denmark; \u263cDepartment of Computer Science, IT University of Copenhagen, Denmark; \u263cDepartment of Computer Science, IT University of Copenhagen, Denmark; \u26f0Center for Information and Language Processing (CIS), LMU Munich, Germany + \u2642robotMunich Center for Machine Learning (MCML), Munich, Germany", + "aff_domain": "itu.dk;itu.dk;itu.dk;lmu.de", + "email": "itu.dk;itu.dk;itu.dk;lmu.de", + "github": "https://github.com/mainlp/logme-nlp4218", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1+2", + "aff_unique_norm": "IT University of Copenhagen;LMU Munich;Munich Center for Machine Learning", + "aff_unique_dep": "Department of Computer Science;Center for Information and Language Processing (CIS);", + "aff_unique_url": "https://itu.dk;https://www.lmu.de;", + "aff_unique_abbr": "ITU Copenhagen;LMU;MCML", + "aff_campus_unique_index": "1+1", + "aff_campus_unique": ";Munich", + "aff_country_unique_index": "0;0;0;1+1", + "aff_country_unique": "Denmark;Germany" + }, + { + "id": "2022.emnlp-main.304", + "title": "ExPUNations: Augmenting Puns with Keywords and Explanations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The tasks of humor understanding and generation are challenging and subjective even for humans, requiring commonsense and real-world knowledge to master. Puns, in particular, add the challenge of fusing that knowledge with the ability to interpret lexical-semantic ambiguity. In this paper, we present the ExPUNations (ExPUN) dataset, in which we augment an existing dataset of puns with detailed crowdsourced annotations of keywords denoting the most distinctive words that make the text funny, pun explanations describing why the text is funny, and fine-grained funniness ratings. This is the first humor dataset with such extensive and fine-grained annotations specifically for puns. Based on these annotations, we propose two tasks: explanation generation to aid with pun classification and keyword-conditioned pun generation, to challenge the current state-of-the-art natural language understanding and generation models\u2019 ability to understand and generate humor. We showcase that the annotated keywords we collect are helpful for generating better novel humorous texts in human evaluation, and that our natural language explanations can be leveraged to improve both the accuracy and robustness of humor classifiers.", + "author": "Jiao Sun; Anjali Narayan-Chen; Shereen Oraby; Alessandra Cervone; Tagyoung Chung; Jing Huang; Yang Liu; Nanyun Peng", + "authorids": "/j/jiao-sun/; /a/anjali-narayan-chen/; /s/shereen-oraby/; /a/alessandra-cervone/; /t/tagyoung-chung/; /j/jing-huang/; /y/yang-liu/; /n/nanyun-peng/", + "bibtex": "@inproceedings{sun-etal-2022-expunations,\n title = \"{E}x{PUN}ations: Augmenting Puns with Keywords and Explanations\",\n author = \"Sun, Jiao and\n Narayan-Chen, Anjali and\n Oraby, Shereen and\n Cervone, Alessandra and\n Chung, Tagyoung and\n Huang, Jing and\n Liu, Yang and\n Peng, Nanyun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.304/\",\n doi = \"10.18653/v1/2022.emnlp-main.304\",\n pages = \"4590--4605\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.304.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.304/", + "pdf_size": 4401622, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1879766762437476414&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Southern California; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; University of Southern California + University of California, Los Angeles", + "aff_domain": "usc.edu;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;cs.ucla.edu", + "email": "usc.edu;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;cs.ucla.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;1;1;1;1;1;0+2", + "aff_unique_norm": "University of Southern California;Amazon;University of California, Los Angeles", + "aff_unique_dep": ";Alexa AI;", + "aff_unique_url": "https://www.usc.edu;https://www.amazon.com;https://www.ucla.edu", + "aff_unique_abbr": "USC;Amazon;UCLA", + "aff_campus_unique_index": "0;0+0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.196", + "title": "Experimental Standards for Deep Learning in Natural Language Processing Research", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The field of Deep Learning (DL) has undergone explosive growth during the last decade, with a substantial impact on Natural Language Processing (NLP) as well. Yet, compared to more established disciplines, a lack of common experimental standards remains an open challenge to the field at large. Starting from fundamental scientific principles, we distill ongoing discussions on experimental standards in NLP into a single, widely-applicable methodology. Following these best practices is crucial to strengthen experimental evidence, improve reproducibility and enable scientific progress. These standards are further collected in a public repository to help them transparently adapt to future needs.", + "author": "Dennis Ulmer; Elisa Bassignana; Max M\u00fcller-Eberstein; Daniel Varab; Mike Zhang; Rob van der Goot; Christian Hardmeier; Barbara Plank", + "authorids": "/d/dennis-ulmer/; /e/elisa-bassignana/; /m/max-muller-eberstein/; /d/daniel-varab/; /m/mike-zhang/; /r/rob-van-der-goot/; /c/christian-hardmeier/; /b/barbara-plank/", + "bibtex": "@inproceedings{ulmer-etal-2022-experimental,\n title = \"Experimental Standards for Deep Learning in Natural Language Processing Research\",\n author = {Ulmer, Dennis and\n Bassignana, Elisa and\n M{\\\"u}ller-Eberstein, Max and\n Varab, Daniel and\n Zhang, Mike and\n van der Goot, Rob and\n Hardmeier, Christian and\n Plank, Barbara},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.196/\",\n doi = \"10.18653/v1/2022.findings-emnlp.196\",\n pages = \"2673--2692\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.196.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.196/", + "pdf_size": 352980, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5200622326915182845&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.74", + "title": "ExpertPLM: Pre-training Expert Representation for Expert Finding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Expert Finding is an important task in Community Question Answering (CQA) platforms, which could help route questions to potential users to answer. The key is to learn representations of experts based on their historical answered questions accurately. In this paper, inspired by the strong text understanding ability of Pretrained Language modelings (PLMs), we propose a pre-training and fine-tuning expert finding framework. The core is that we design an expert-level pre-training paradigm, that effectively integrates expert interest and expertise simultaneously. Specifically different from the typical corpus-level pre-training, we treat each expert as the basic pre-training unit including all the historical answered question titles of the expert, which could fully indicate the expert interests for questions. Besides, we integrate the vote score information along with each answer of the expert into the pre-training phrase to model the expert ability explicitly. Finally, we propose a novel reputation-augmented Masked Language Model (MLM) pre-training strategy to capture the expert reputation information. In this way, our method could learn expert representation comprehensively, which then will be adopted and fine-tuned in the down-streaming expert-finding task. Extensive experimental results on six real-world CQA datasets demonstrate the effectiveness of our method.", + "author": "Qiyao Peng; Hongtao Liu", + "authorids": "/q/qiyao-peng/; /h/hongtao-liu/", + "bibtex": "@inproceedings{peng-liu-2022-expertplm,\n title = \"{E}xpert{PLM}: Pre-training Expert Representation for Expert Finding\",\n author = \"Peng, Qiyao and\n Liu, Hongtao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.74/\",\n doi = \"10.18653/v1/2022.findings-emnlp.74\",\n pages = \"1043--1052\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.74.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.74/", + "pdf_size": 406711, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16867535840192764432&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "", + "project": "https://stackexchange.com", + "author_num": 2 + }, + { + "id": "2022.emnlp-main.356", + "title": "Explainable Question Answering based on Semantic Graph by Global Differentiable Learning and Dynamic Adaptive Reasoning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multi-hop Question Answering is an agent task for testing the reasoning ability. With the development of pre-trained models, the implicit reasoning ability has been surprisingly improved and can even surpass human performance. However, the nature of the black box hinders the construction of explainable intelligent systems. Several researchers have explored explainable neural-symbolic reasoning methods based on question decomposition techniques. The undifferentiable symbolic operations and the error propagation in the reasoning process lead to poor performance. To alleviate it, we propose a simple yet effective Global Differentiable Learning strategy to explore optimal reasoning paths from the latent probability space so that the model learns to solve intermediate reasoning processes without expert annotations. We further design a Dynamic Adaptive Reasoner to enhance the generalization of unseen questions. Our method achieves 17% improvements in F1-score against BreakRC and shows better interpretability. We take a step forward in building interpretable reasoning methods.", + "author": "Jianguo Mao; Wenbin Jiang; Xiangdong Wang; Hong Liu; Yu Xia; Yajuan Lyu; QiaoQiao She", + "authorids": "/j/jianguo-mao/; /w/wenbin-jiang/; /x/xiangdong-wang/; /h/hong-liu/; /y/yu-xia/; /y/yajuan-lyu/; /q/qiaoqiao-she/", + "bibtex": "@inproceedings{mao-etal-2022-explainable,\n title = \"Explainable Question Answering based on Semantic Graph by Global Differentiable Learning and Dynamic Adaptive Reasoning\",\n author = \"Mao, Jianguo and\n Jiang, Wenbin and\n Wang, Xiangdong and\n Liu, Hong and\n Xia, Yu and\n Lyu, Yajuan and\n She, QiaoQiao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.356/\",\n doi = \"10.18653/v1/2022.emnlp-main.356\",\n pages = \"5318--5325\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.356.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.356/", + "pdf_size": 794997, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1857253379463267192&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 0, + "aff": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China+University of Chinese Academy of Sciences, Beijing, China; Baidu Inc., Beijing, China; Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China", + "aff_domain": "ict.ac.cn;baidu.com;ict.ac.cn;ict.ac.cn;baidu.com;baidu.com;baidu.com", + "email": "ict.ac.cn;baidu.com;ict.ac.cn;ict.ac.cn;baidu.com;baidu.com;baidu.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2;0;0;2;2;2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Baidu Inc.", + "aff_unique_dep": "Institute of Computing Technology;;", + "aff_unique_url": "http://www.ict.cas.cn;http://www.ucas.ac.cn;https://www.baidu.com", + "aff_unique_abbr": "CAS;UCAS;Baidu", + "aff_campus_unique_index": "0+0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.245", + "title": "Explainable Slot Type Attentions to Improve Joint Intent Detection and Slot Filling", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Joint intent detection and slot filling is a key research topic in natural language understanding (NLU). Existing joint intent and slot filling systems analyze and compute features collectively for all slot types, and importantly, have no way to explain the slot filling model decisions. In this work, we propose a novel approach that: (i) learns to generate additional slot type specific features in order to improve accuracy and (ii) provides explanations for slot filling decisions for the first time in a joint NLU model. We perform an additional constrained supervision using a set of binary classifiers for the slot type specific feature learning, thus ensuring appropriate attention weights are learned in the process to explain slot filling decisions for utterances. Our model is inherently explainable and does not need any post-hoc processing. We evaluate our approach on two widely used datasets and show accuracy improvements. Moreover, a detailed analysis is also provided for the exclusive slot explainability.", + "author": "Kalpa Gunaratna; Vijay Srinivasan; Akhila Yerukola; Hongxia Jin", + "authorids": "/k/kalpa-gunaratna/; /v/vijay-srinivasan/; /a/akhila-yerukola/; /h/hongxia-jin/", + "bibtex": "@inproceedings{gunaratna-etal-2022-explainable,\n title = \"Explainable Slot Type Attentions to Improve Joint Intent Detection and Slot Filling\",\n author = \"Gunaratna, Kalpa and\n Srinivasan, Vijay and\n Yerukola, Akhila and\n Jin, Hongxia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.245/\",\n doi = \"10.18653/v1/2022.findings-emnlp.245\",\n pages = \"3367--3378\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.245.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.245/", + "pdf_size": 1464148, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17498404278099838144&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Samsung Research America, Mountain View CA, USA; Samsung Research America, Mountain View CA, USA; Carnegie Mellon University, Pittsburgh PA, USA + Samsung Research America, Mountain View CA, USA; Samsung Research America, Mountain View CA, USA", + "aff_domain": "samsung.com;samsung.com;andrew.cmu.edu;samsung.com", + "email": "samsung.com;samsung.com;andrew.cmu.edu;samsung.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1+0;0", + "aff_unique_norm": "Samsung Research America;Carnegie Mellon University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.samsung.com/us/;https://www.cmu.edu", + "aff_unique_abbr": "SRA;CMU", + "aff_campus_unique_index": "0;0;1+0;0", + "aff_campus_unique": "Mountain View;Pittsburgh", + "aff_country_unique_index": "0;0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.311", + "title": "Explicit Query Rewriting for Conversational Dense Retrieval", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In a conversational search scenario, a query might be context-dependent because some words are referred to previous expressions or omitted. Previous works tackle the issue by either reformulating the query into a self-contained query (query rewriting) or learning a contextualized query embedding from the query context (context modelling). In this paper, we propose a model CRDR that can perform query rewriting and context modelling in a unified framework in which the query rewriting\u2019s supervision signals further enhance the context modelling. Instead of generating a new query, CRDR only performs necessary modifications on the original query, which improves both accuracy and efficiency of query rewriting. In the meantime, the query rewriting benefits the context modelling by explicitly highlighting relevant terms in the query context, which improves the quality of the learned contextualized query embedding. To verify the effectiveness of CRDR, we perform comprehensive experiments on TREC CAsT-19 and TREC CAsT-20 datasets, and the results show that our method outperforms all baseline models in terms of both quality of query rewriting and quality of context-aware ranking.", + "author": "Hongjin Qian; Zhicheng Dou", + "authorids": "/h/hongjin-qian/; /z/zhicheng-dou/", + "bibtex": "@inproceedings{qian-dou-2022-explicit,\n title = \"Explicit Query Rewriting for Conversational Dense Retrieval\",\n author = \"Qian, Hongjin and\n Dou, Zhicheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.311/\",\n doi = \"10.18653/v1/2022.emnlp-main.311\",\n pages = \"4725--4737\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.311.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.311/", + "pdf_size": 1136005, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16696926994462853776&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Gaoling School of Artificial Intelligence, Renmin University of China; Gaoling School of Artificial Intelligence, Renmin University of China", + "aff_domain": "ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Renmin University of China", + "aff_unique_dep": "Gaoling School of Artificial Intelligence", + "aff_unique_url": "http://www.ruc.edu.cn", + "aff_unique_abbr": "RUC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.254", + "title": "Explicit Role Interaction Network for Event Argument Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Event argument extraction is a challenging subtask of event extraction, aiming to identify and assign roles to arguments under a certain event. Existing methods extract arguments of each role independently, ignoring the relationship between different roles. Such an approach hinders the model from learning explicit interactions between different roles to improve the performance of individual argument extraction. As a solution, we design a neural model that we refer to as the Explicit Role Interaction Network (ERIN) which allows for dynamically capturing the correlations between different argument roles within an event. Extensive experiments on the benchmark dataset ACE2005 demonstrate the superiority of our proposed model to existing approaches.", + "author": "Nan Ding; Chunming Hu; Kai Sun; Samuel Mensah; Richong Zhang", + "authorids": "/n/nan-ding/; /c/chunming-hu/; /k/kai-sun/; /s/samuel-mensah/; /r/richong-zhang/", + "bibtex": "@inproceedings{ding-etal-2022-explicit,\n title = \"Explicit Role Interaction Network for Event Argument Extraction\",\n author = \"Ding, Nan and\n Hu, Chunming and\n Sun, Kai and\n Mensah, Samuel and\n Zhang, Richong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.254/\",\n doi = \"10.18653/v1/2022.findings-emnlp.254\",\n pages = \"3475--3485\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.254.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.254/", + "pdf_size": 654809, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13525525598650472697&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "SKLSDE, Beihang University, Beijing, China; SKLSDE, Beihang University, Beijing, China + Zhongguancun Laboratory, Beijing, China; SKLSDE, Beihang University, Beijing, China; Department of Computer Science, University of Sheffield, UK; SKLSDE, Beihang University, Beijing, China + Zhongguancun Laboratory, Beijing, China", + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;sheffield.ac.uk;act.buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;sheffield.ac.uk;act.buaa.edu.cn", + "github": "https://github.com/bellytina/Explicit_Role_Interaction_Network", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;0;2;0+1", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory;University of Sheffield", + "aff_unique_dep": "SKLSDE;;Department of Computer Science", + "aff_unique_url": "http://www.buaa.edu.cn;;https://www.sheffield.ac.uk", + "aff_unique_abbr": ";;Sheffield", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0+0;0;1;0+0", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "2022.emnlp-main.268", + "title": "Exploiting Global and Local Hierarchies for Hierarchical Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Hierarchical text classification aims to leverage label hierarchy in multi-label text classification. Existing methods encode label hierarchy in a global view, where label hierarchy is treated as the static hierarchical structure containing all labels. Since global hierarchy is static and irrelevant to text samples, it makes these methods hard to exploit hierarchical information. Contrary to global hierarchy, local hierarchy as a structured labels hierarchy corresponding to each text sample. It is dynamic and relevant to text samples, which is ignored in previous methods. To exploit global and local hierarchies, we propose Hierarchy-guided BERT with Global and Local hierarchies (HBGL), which utilizes the large-scale parameters and prior language knowledge of BERT to model both global and local hierarchies. Moreover, HBGL avoids the intentional fusion of semantic and hierarchical modules by directly modeling semantic and hierarchical information with BERT. Compared with the state-of-the-art method HGCLR, our method achieves significant improvement on three benchmark datasets.", + "author": "Ting Jiang; Deqing Wang; Leilei Sun; Zhongzhi Chen; Fuzhen Zhuang; Qinghong Yang", + "authorids": "/t/ting-jiang/; /d/deqing-wang/; /l/leilei-sun/; /z/zhongzhi-chen/; /f/fuzhen-zhuang/; /q/qinghong-yang/", + "bibtex": "@inproceedings{jiang-etal-2022-exploiting,\n title = \"Exploiting Global and Local Hierarchies for Hierarchical Text Classification\",\n author = \"Jiang, Ting and\n Wang, Deqing and\n Sun, Leilei and\n Chen, Zhongzhi and\n Zhuang, Fuzhen and\n Yang, Qinghong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.268/\",\n doi = \"10.18653/v1/2022.emnlp-main.268\",\n pages = \"4030--4039\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.268.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.268/", + "pdf_size": 499374, + "gs_citation": 44, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16952356252820290987&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "SKLSDE, School of Computer, Beihang University, Beijing, China; SKLSDE, School of Computer, Beihang University, Beijing, China + Zhongguancun Laboratory, Beijing, China; SKLSDE, School of Computer, Beihang University, Beijing, China; School of Software, Beihang University, Beijing, China; SKLSDE, School of Computer, Beihang University, Beijing, China + Institute of Artificial Intelligence, Beihang University, Beijing, China + Zhongguancun Laboratory, Beijing, China; School of Software, Beihang University, Beijing, China", + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "http://github.com/kongds/HBGL", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0;0;0+0+1;0", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory", + "aff_unique_dep": "School of Computer;", + "aff_unique_url": "http://www.buaa.edu.cn;", + "aff_unique_abbr": "BUAA;", + "aff_campus_unique_index": "0;0;0;0;0+0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0+0;0;0;0+0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-industry.13", + "title": "Exploiting In-Domain Bilingual Corpora for Zero-Shot Transfer Learning in NLU of Intra-Sentential Code-Switching Chatbot Interactions", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Code-switching (CS) is a very common phenomenon in regions with various co-existing languages. Since CS is such a frequent habit in informal communications, both spoken and written, it also arises naturally in Human-Machine Interactions. Therefore, in order for natural language understanding (NLU) not to be degraded, CS must be taken into account when developing chatbots. The co-existence of multiple languages in a single NLU model has become feasible with multilingual language representation models such as mBERT. In this paper, the efficacy of zero-shot cross-lingual transfer learning with mBERT for NLU is evaluated on a Basque-Spanish CS chatbot corpus, comparing the performance of NLU models trained using in-domain chatbot utterances in Basque and/or Spanish without CS. The results obtained indicate that training joint multi-intent classification and entity recognition models on both languages simultaneously achieves best performance, better capturing the CS patterns.", + "author": "Maia Aguirre; Manex Serras; Laura Garc\u00eda-sardi\u00f1a; Jacobo L\u00f3pez-fern\u00e1ndez; Ariane M\u00e9ndez; Arantza Del Pozo", + "authorids": "/m/maia-aguirre/; /m/manex-serras/; /l/laura-garcia-sardina/; /j/jacobo-lopez-fernandez/; /a/ariane-mendez/; /a/arantza-del-pozo/", + "bibtex": "@inproceedings{aguirre-etal-2022-exploiting,\n title = \"Exploiting In-Domain Bilingual Corpora for Zero-Shot Transfer Learning in {NLU} of Intra-Sentential Code-Switching Chatbot Interactions\",\n author = \"Aguirre, Maia and\n Serras, Manex and\n Garc{\\'i}a-sardi{\\~n}a, Laura and\n L{\\'o}pez-fern{\\'a}ndez, Jacobo and\n M{\\'e}ndez, Ariane and\n Del Pozo, Arantza\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.13/\",\n doi = \"10.18653/v1/2022.emnlp-industry.13\",\n pages = \"138--144\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.13.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.13/", + "pdf_size": 425148, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1259354393969579103&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 0, + "aff": "Vicomtech Foundation, Basque Research and Technology Alliance (BRTA); Vicomtech Foundation, Basque Research and Technology Alliance (BRTA); Vicomtech Foundation, Basque Research and Technology Alliance (BRTA); Vicomtech Foundation, Basque Research and Technology Alliance (BRTA); Vicomtech Foundation, Basque Research and Technology Alliance (BRTA); Vicomtech Foundation, Basque Research and Technology Alliance (BRTA)", + "aff_domain": "vicomtech.org;vicomtech.org;vicomtech.org;vicomtech.org;vicomtech.org;vicomtech.org", + "email": "vicomtech.org;vicomtech.org;vicomtech.org;vicomtech.org;vicomtech.org;vicomtech.org", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Vicomtech Foundation", + "aff_unique_dep": "", + "aff_unique_url": "https://www.vicomtech.org", + "aff_unique_abbr": "Vicomtech", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Spain" + }, + { + "id": "2022.findings-emnlp.164", + "title": "Exploiting Labeled and Unlabeled Data via Transformer Fine-tuning for Peer-Review Score Prediction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Automatic Peer-review Aspect Score Prediction (PASP) of academic papers can be a helpful assistant tool for both reviewers and authors. Most existing works on PASP utilize supervised learning techniques. However, the limited number of peer-review data deteriorates the performance of PASP. This paper presents a novel semi-supervised learning (SSL) method that incorporates the Transformer fine-tuning into the \u0393-model, a variant of the Ladder network, to leverage contextual features from unlabeled data. Backpropagation simultaneously minimizes the sum of supervised and unsupervised cost functions, avoiding the need for layer-wise pre-training. The experimental results show that our model outperforms the supervised and naive semi-supervised learning baselines. Our source codes are available online.", + "author": "Panitan Muangkammuen; Fumiyo Fukumoto; Jiyi Li; Yoshimi Suzuki", + "authorids": "/p/panitan-muangkammuen/; /f/fumiyo-fukumoto/; /j/jiyi-li/; /y/yoshimi-suzuki/", + "bibtex": "@inproceedings{muangkammuen-etal-2022-exploiting,\n title = \"Exploiting Labeled and Unlabeled Data via Transformer Fine-tuning for Peer-Review Score Prediction\",\n author = \"Muangkammuen, Panitan and\n Fukumoto, Fumiyo and\n Li, Jiyi and\n Suzuki, Yoshimi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.164/\",\n doi = \"10.18653/v1/2022.findings-emnlp.164\",\n pages = \"2233--2240\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.164.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.164/", + "pdf_size": 322867, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4310003185521209586&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Integrated Graduate School of Medicine, Engineering, and Agricultural Sciences, Faculty of Engineering, University of Yamanashi, Kofu, Japan; Integrated Graduate School of Medicine, Engineering, and Agricultural Sciences, Faculty of Engineering, University of Yamanashi, Kofu, Japan; Integrated Graduate School of Medicine, Engineering, and Agricultural Sciences, Faculty of Engineering, University of Yamanashi, Kofu, Japan; Integrated Graduate School of Medicine, Engineering, and Agricultural Sciences, Faculty of Engineering, University of Yamanashi, Kofu, Japan", + "aff_domain": "yamanashi.ac.jp;yamanashi.ac.jp;yamanashi.ac.jp;yamanashi.ac.jp", + "email": "yamanashi.ac.jp;yamanashi.ac.jp;yamanashi.ac.jp;yamanashi.ac.jp", + "github": "https://github.com/panitan-m/gamma_trans", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Yamanashi", + "aff_unique_dep": "Faculty of Engineering", + "aff_unique_url": "https://www.u-yamanashi.ac.jp", + "aff_unique_abbr": "UoY", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Kofu", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.emnlp-main.157", + "title": "Exploiting domain-slot related keywords description for Few-Shot Cross-Domain Dialogue State Tracking", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Collecting dialogue data with domain-slot-value labels for dialogue state tracking (DST) could be a costly process. In this paper, we propose a novel framework based on domain-slot related description to tackle the challenge of few-shot cross-domain DST. Specifically, we design an extraction module to extract domain-slot related verbs and nouns in the dialogue. Then, we integrates them into the description, which aims to prompt the model to identify the slot information. Furthermore, we introduce a random sampling strategy to improve the domain generalization ability of the model. We utilize a pre-trained model to encode contexts and description and generates answers with an auto-regressive manner. Experimental results show that our approaches substantially outperform the existing few-shot DST methods on MultiWOZ and gain strong improvements on the slot accuracy comparing to existing slot description methods.", + "author": "Gao Qixiang; Guanting Dong; Yutao Mou; Liwen Wang; Chen Zeng; Daichi Guo; Mingyang Sun; Weiran Xu", + "authorids": "/g/gao-qixiang/; /g/guanting-dong/; /y/yutao-mou/; /l/liwen-wang/; /c/chen-zeng/; /d/daichi-guo/; /m/mingyang-sun/; /w/weiran-xu/", + "bibtex": "@inproceedings{qixiang-etal-2022-exploiting,\n title = \"Exploiting domain-slot related keywords description for Few-Shot Cross-Domain Dialogue State Tracking\",\n author = \"Qixiang, Gao and\n Dong, Guanting and\n Mou, Yutao and\n Wang, Liwen and\n Zeng, Chen and\n Guo, Daichi and\n Sun, Mingyang and\n Xu, Weiran\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.157/\",\n doi = \"10.18653/v1/2022.emnlp-main.157\",\n pages = \"2460--2465\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.157.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.157/", + "pdf_size": 212695, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1109657719778354709&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications", + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications", + "aff_unique_dep": "", + "aff_unique_url": "http://www.bupt.edu.cn/", + "aff_unique_abbr": "BUPT", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.447", + "title": "Exploration of the Usage of Color Terms by Color-blind Participants in Online Discussion Platforms", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prominent questions about the role of sensory vs. linguistic input in the way we acquire and use language have been extensively studied in the psycholinguistic literature. However, the relative effect of various factors in a person\u2019s overall experience on their linguistic system remains unclear. We study this question by making a step forward towards a better understanding of the conceptual perception of colors by color-blind individuals, as reflected in their spontaneous linguistic productions. Using a novel and carefully curated dataset, we show that red-green color-blind speakers use the \u201cred\u201d and \u201cgreen\u201d color terms in less predictable contexts, and in linguistic environments evoking mental image to a lower extent, when compared to their normal-sighted counterparts. These findings shed some new and interesting light on the role of sensory experience on our linguistic system.", + "author": "Ella Rabinovich; Boaz Carmeli", + "authorids": "/e/ella-rabinovich/; /b/boaz-carmeli/", + "bibtex": "@inproceedings{rabinovich-carmeli-2022-exploration,\n title = \"Exploration of the Usage of Color Terms by Color-blind Participants in Online Discussion Platforms\",\n author = \"Rabinovich, Ella and\n Carmeli, Boaz\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.447/\",\n doi = \"10.18653/v1/2022.emnlp-main.447\",\n pages = \"6673--6685\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.447.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.447/", + "pdf_size": 1186919, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:tkvODn4HLUQJ:scholar.google.com/&scioq=Exploration+of+the+Usage+of+Color+Terms+by+Color-blind+Participants+in+Online+Discussion+Platforms&hl=en&as_sdt=0,44", + "gs_version_total": 5, + "aff": "IBM Research; IBM Research + Technion \u2013 Israel Institute of Technology", + "aff_domain": "ibm.com;il.ibm.com", + "email": "ibm.com;il.ibm.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "IBM;Technion \u2013 Israel Institute of Technology", + "aff_unique_dep": "IBM Research;", + "aff_unique_url": "https://www.ibm.com/research;https://www.technion.ac.il/en/", + "aff_unique_abbr": "IBM;Technion", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1", + "aff_country_unique": "United States;Israel" + }, + { + "id": "2022.findings-emnlp.453", + "title": "Explore Unsupervised Structures in Pretrained Models for Relation Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Syntactic trees have been widely applied in relation extraction (RE). However, since parsing qualities are not stable on different text domains and a pre-defined grammar may not well fit the target relation schema, the introduction of syntactic structures sometimes fails to improve RE performances consistently. In this work, we study RE models with various unsupervised structures mined from pre-trained language models (e.g., BERT). We show that, similar to syntactic trees, unsupervised structures are quite informative for RE task: they are able to obtain competitive (even the best) performance scores on benchmark RE datasets (ACE05, WebNLG, SciERC). We also conduct detailed analyses on their abilities of adapting new RE domains and influence of noise links in those structures. The results suggest that unsupervised structures are reasonable alternatives of commonly used syntactic structures in relation extraction models.", + "author": "Xi Yang; Tao Ji; Yuanbin Wu", + "authorids": "/x/xi-yang/; /t/tao-ji/; /y/yuanbin-wu/", + "bibtex": "@inproceedings{yang-etal-2022-explore,\n title = \"Explore Unsupervised Structures in Pretrained Models for Relation Extraction\",\n author = \"Yang, Xi and\n Ji, Tao and\n Wu, Yuanbin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.453/\",\n doi = \"10.18653/v1/2022.findings-emnlp.453\",\n pages = \"6103--6117\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.453.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.453/", + "pdf_size": 746752, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:oSHHGGULDeIJ:scholar.google.com/&scioq=Explore+Unsupervised+Structures+in+Pretrained+Models+for+Relation+Extraction&hl=en&as_sdt=0,33", + "gs_version_total": 2, + "aff": "School of Computer Science and Technology, East China Normal University; School of Computer Science and Technology, East China Normal University; School of Computer Science and Technology, East China Normal University", + "aff_domain": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;cs.ecnu.edu.cn", + "email": "stu.ecnu.edu.cn;stu.ecnu.edu.cn;cs.ecnu.edu.cn", + "github": "https://github.com/xyang41/structures-for-RE", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "East China Normal University", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "http://www.ecnu.edu.cn", + "aff_unique_abbr": "ECNU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.92", + "title": "Exploring Compositional Image Retrieval with Hybrid Compositional Learning and Heuristic Negative Mining", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Compositional image retrieval (CIR) is a challenging retrieval task, where the query is composed of a reference image and a modification text, and the target is another image reflecting the modification to the reference image. Due to the great success of the pre-trained vision-and-language model CLIP and its favorable applicability to large-scale retrieval tasks, we propose a CIR model HyCoLe-HNM with CLIP as the backbone. In HyCoLe-HNM, we follow the contrastive pre-training method of CLIP to perform cross-modal representation learning. On this basis, we propose a hybrid compositional learning mechanism, which includes both image compositional learning and text compositional learning. In hybrid compositional learning, we borrow a gated fusion mechanism from a question answering model to perform compositional fusion, and propose a heuristic negative mining method to filter negative samples. Privileged information in the form of image-related texts is utilized in cross-modal representation learning and hybrid compositional learning. Experimental results show that HyCoLe-HNM achieves state-of-the-art performance on three CIR datasets, namely FashionIQ, Fashion200K, and MIT-States.", + "author": "Chao Wang; Ehsan Nezhadarya; Tanmana Sadhu; Shengdong Zhang", + "authorids": "/c/chao-wang/; /e/ehsan-nezhadarya/; /t/tanmana-sadhu/; /s/shengdong-zhang/", + "bibtex": "@inproceedings{wang-etal-2022-exploring,\n title = \"Exploring Compositional Image Retrieval with Hybrid Compositional Learning and Heuristic Negative Mining\",\n author = \"Wang, Chao and\n Nezhadarya, Ehsan and\n Sadhu, Tanmana and\n Zhang, Shengdong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.92/\",\n doi = \"10.18653/v1/2022.findings-emnlp.92\",\n pages = \"1273--1285\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.92.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.92/", + "pdf_size": 1438378, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9893196758246171900&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.672", + "title": "Exploring Document-Level Literary Machine Translation with Parallel Paragraphs from World Literature", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Literary translation is a culturally significant task, but it is bottlenecked by the small number of qualified literary translators relative to the many untranslated works published around the world. Machine translation (MT) holds potential to complement the work of human translators by improving both training procedures and their overall efficiency. Literary translation is less constrained than more traditional MT settings since translators must balance meaning equivalence, readability, and critical interpretability in the target language. This property, along with the complex discourse-level context present in literary texts, also makes literary MT more challenging to computationally model and evaluate. To explore this task, we collect a dataset (Par3) of non-English language novels in the public domain, each aligned at the paragraph level to both human and automatic English translations. Using Par3, we discover that expert literary translators prefer reference human translations over machine-translated paragraphs at a rate of 84%, while state-of-the-art automatic MT metrics do not correlate with those preferences. The experts note that MT outputs contain not only mistranslations, but also discourse-disrupting errors and stylistic inconsistencies. To address these problems, we train a post-editing model whose output is preferred over normal MT output at a rate of 69% by experts. We publicly release Par3 to spur future research into literary MT.", + "author": "Katherine Thai; Marzena Karpinska; Kalpesh Krishna; Bill Ray; Moira Inghilleri; John Wieting; Mohit Iyyer", + "authorids": "/k/katherine-thai/; /m/marzena-karpinska/; /k/kalpesh-krishna/; /b/bill-ray/; /m/moira-inghilleri/; /j/john-wieting/; /m/mohit-iyyer/", + "bibtex": "@inproceedings{thai-etal-2022-exploring,\n title = \"Exploring Document-Level Literary Machine Translation with Parallel Paragraphs from World Literature\",\n author = \"Thai, Katherine and\n Karpinska, Marzena and\n Krishna, Kalpesh and\n Ray, Bill and\n Inghilleri, Moira and\n Wieting, John and\n Iyyer, Mohit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.672/\",\n doi = \"10.18653/v1/2022.emnlp-main.672\",\n pages = \"9882--9902\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.672.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.672/", + "pdf_size": 1154764, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8390042388683295583&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Manning College of Information and Computer Sciences, UMass Amherst\u2662; Department of Languages, Literatures, and Cultures, UMass Amherst\u2660; Google Research\u2663; Manning College of Information and Computer Sciences, UMass Amherst\u2662; Department of Languages, Literatures, and Cultures, UMass Amherst\u2660; Google Research\u2663; Manning College of Information and Computer Sciences, UMass Amherst\u2662", + "aff_domain": "cs.umass.edu;cs.umass.edu;cs.umass.edu;cs.umass.edu;complit.umass.edu;google.com; ", + "email": "cs.umass.edu;cs.umass.edu;cs.umass.edu;cs.umass.edu;complit.umass.edu;google.com; ", + "github": "https://github.com/katherinethai/par3/", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;1;0", + "aff_unique_norm": "University of Massachusetts Amherst;Google", + "aff_unique_dep": "Manning College of Information and Computer Sciences;Google Research", + "aff_unique_url": "https://www.umass.edu;https://research.google", + "aff_unique_abbr": "UMass Amherst;Google Research", + "aff_campus_unique_index": "0;0;1;0;0;1;0", + "aff_campus_unique": "Amherst;Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.640", + "title": "Exploring Dual Encoder Architectures for Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Dual encoders have been used for question-answering (QA) and information retrieval (IR) tasks with good results. There are two major types of dual encoders, Siamese Dual Encoders (SDE), with parameters shared across two encoders, and Asymmetric Dual Encoder (ADE), with two distinctly parameterized encoders. In this work, we explore the dual encoder architectures for QA retrieval tasks. By evaluating on MS MARCO, open domain NQ, and the MultiReQA benchmarks, we show that SDE performs significantly better than ADE. We further propose three different improved versions of ADEs. Based on the evaluation of QA retrieval tasks and direct analysis of the embeddings, we demonstrate that sharing parameters in projection layers would enable ADEs to perform competitively with SDEs.", + "author": "Zhe Dong; Jianmo Ni; Dan Bikel; Enrique Alfonseca; Yuan Wang; Chen Qu; Imed Zitouni", + "authorids": "/z/zhe-dong/; /j/jianmo-ni/; /d/daniel-m-bikel/; /e/enrique-alfonseca/; /y/yuan-wang/; /c/chen-qu/; /i/imed-zitouni/", + "bibtex": "@inproceedings{dong-etal-2022-exploring,\n title = \"Exploring Dual Encoder Architectures for Question Answering\",\n author = \"Dong, Zhe and\n Ni, Jianmo and\n Bikel, Dan and\n Alfonseca, Enrique and\n Wang, Yuan and\n Qu, Chen and\n Zitouni, Imed\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.640/\",\n doi = \"10.18653/v1/2022.emnlp-main.640\",\n pages = \"9414--9419\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.640.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.640/", + "pdf_size": 605738, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7851417985659630951&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Google Inc; Google Inc; Google Inc; Google Inc; Google Inc; Google Inc; Google Inc", + "aff_domain": "google.com;google.com;google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "https://sites.google.com/view/explore-dual-encoder-architect", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "", + "aff_unique_url": "https://www.google.com", + "aff_unique_abbr": "Google", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.292", + "title": "Exploring Logographic Image for Chinese Aspect-based Sentiment Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In logographic languages like Chinese, word meanings are constructed using specific character formations, which can help to disambiguate word senses and are beneficial for sentiment classification. However, such knowledge is rarely explored in previous sentiment analysis methods. In this paper, we focus on exploring the logographic information for aspect-based sentiment classification in Chinese text. Specifically, we employ a logographic image to capture an internal morphological structure from the character sequence. The logographic image is also used to learn the external relations among context and aspect words. Furthermore, we propose a multimodal language model to explicitly incorporate a logographic image with review text for aspect-based sentiment classification in Chinese. Experimental results show that our method brings substantial performance improvement over strong baselines. The results also indicate that the logographic image is very important for exploring the internal structure and external relations from the character sequence.", + "author": "Xiabing Zhou; Renjie Feng; Xiaotong Jiang; Zhongqing Wang", + "authorids": "/x/xiabing-zhou/; /r/renjie-feng/; /x/xiaotong-jiang/; /z/zhongqing-wang/", + "bibtex": "@inproceedings{zhou-etal-2022-exploring,\n title = \"Exploring Logographic Image for {C}hinese Aspect-based Sentiment Classification\",\n author = \"Zhou, Xiabing and\n Feng, Renjie and\n Jiang, Xiaotong and\n Wang, Zhongqing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.292/\",\n doi = \"10.18653/v1/2022.findings-emnlp.292\",\n pages = \"3963--3972\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.292.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.292/", + "pdf_size": 1342874, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:t0HfJSb9o6gJ:scholar.google.com/&scioq=Exploring+Logographic+Image+for+Chinese+Aspect-based+Sentiment+Classification&hl=en&as_sdt=0,44", + "gs_version_total": 2, + "aff": "Natural Language Processing Lab, Soochow University, Suzhou, China; Natural Language Processing Lab, Soochow University, Suzhou, China; Natural Language Processing Lab, Soochow University, Suzhou, China; Natural Language Processing Lab, Soochow University, Suzhou, China", + "aff_domain": "suda.edu.cn;qq.com;outlook.com;suda.edu.cn", + "email": "suda.edu.cn;qq.com;outlook.com;suda.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Soochow University", + "aff_unique_dep": "Natural Language Processing Lab", + "aff_unique_url": "http://www.soochow.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Suzhou", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.469", + "title": "Exploring Methods for Building Dialects-Mandarin Code-Mixing Corpora: A Case Study in Taiwanese Hokkien", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In natural language processing (NLP), code-mixing (CM) is a challenging task, especially when the mixed languages include dialects. In Southeast Asian countries such as Singapore, Indonesia, and Malaysia, Hokkien-Mandarin is the most widespread code-mixed language pair among Chinese immigrants, and it is also common in Taiwan. However, dialects such as Hokkien often have a scarcity of resources and the lack of an official writing system, limiting the development of dialect CM research. In this paper, we propose a method to construct a Hokkien-Mandarin CM dataset to mitigate the limitation, overcome the morphological issue under the Sino-Tibetan language family, and offer an efficient Hokkien word segmentation method through a linguistics-based toolkit. Furthermore, we use our proposed dataset and employ transfer learning to train the XLM (cross-lingual language model) for translation tasks. To fit the code-mixing scenario, we adapt XLM slightly. We found that by using linguistic knowledge, rules, and language tags, the model produces good results on CM data translation while maintaining monolingual translation quality.", + "author": "Sin-En Lu; Bo-Han Lu; Chao-Yi Lu; Richard Tzong-Han Tsai", + "authorids": "/s/sin-en-lu/; /b/bo-han-lu/; /c/chao-yi-lu/; /r/richard-tzong-han-tsai/", + "bibtex": "@inproceedings{lu-etal-2022-exploring,\n title = \"Exploring Methods for Building Dialects-{M}andarin Code-Mixing Corpora: A Case Study in {T}aiwanese Hokkien\",\n author = \"Lu, Sin-En and\n Lu, Bo-Han and\n Lu, Chao-Yi and\n Tsai, Richard Tzong-Han\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.469/\",\n doi = \"10.18653/v1/2022.findings-emnlp.469\",\n pages = \"6287--6305\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.469.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.469/", + "pdf_size": 644437, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3217197312733795758&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Information Engineering, National Central University, Taiwan+Center for GIS, Research Center for Humanities and Social Sciences, Academia Sinica, Taiwan; Department of Computer Science and Information Engineering, National Central University, Taiwan+Center for GIS, Research Center for Humanities and Social Sciences, Academia Sinica, Taiwan; Purdue University, USA; Department of Computer Science and Information Engineering, National Central University, Taiwan+Purdue University, USA+Center for GIS, Research Center for Humanities and Social Sciences, Academia Sinica, Taiwan", + "aff_domain": "gmail.com;cc.ncu.edu.tw;purdue.edu;g.ncu.edu.tw", + "email": "gmail.com;cc.ncu.edu.tw;purdue.edu;g.ncu.edu.tw", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;2;0+2+1", + "aff_unique_norm": "National Central University;Academia Sinica;Purdue University", + "aff_unique_dep": "Department of Computer Science and Information Engineering;Center for GIS, Research Center for Humanities and Social Sciences;", + "aff_unique_url": "https://www.ncu.edu.tw;https://www.sinica.edu.tw;https://www.purdue.edu", + "aff_unique_abbr": "NCU;AS;Purdue", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;1;0+1+0", + "aff_country_unique": "Taiwan, China;United States" + }, + { + "id": "2022.emnlp-main.451", + "title": "Exploring Mode Connectivity for Pre-trained Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent years have witnessed the prevalent application of pre-trained language models (PLMs) in NLP. From the perspective of parameter space, PLMs provide generic initialization, starting from which high-performance minima could be found. Although plenty of works have studied how to effectively and efficiently adapt PLMs to high-performance minima, little is known about the connection of various minima reached under different adaptation configurations. In this paper, we investigate the geometric connections of different minima through the lens of mode connectivity, which measures whether two minima can be connected with a low-loss path. We conduct empirical analyses to investigate three questions: (1) how could hyperparameters, specific tuning methods, and training data affect PLM\u2019s mode connectivity? (2) How does mode connectivity change during pre-training? (3) How does the PLM\u2019s task knowledge change along the path connecting two minima? In general, exploring the mode connectivity of PLMs conduces to understanding the geometric connection of different minima, which may help us fathom the inner workings of PLM downstream adaptation. The codes are publicly available at https://github.com/thunlp/Mode-Connectivity-PLM.", + "author": "Yujia Qin; Cheng Qian; Jing Yi; Weize Chen; Yankai Lin; Xu Han; Zhiyuan Liu; Maosong Sun; Jie Zhou", + "authorids": "/y/yujia-qin/; /c/cheng-qian/; /j/jing-yi/; /w/weize-chen/; /y/yankai-lin/; /x/xu-han/; /z/zhiyuan-liu/; /m/maosong-sun/; /j/jie-zhou/", + "bibtex": "@inproceedings{qin-etal-2022-exploring,\n title = \"Exploring Mode Connectivity for Pre-trained Language Models\",\n author = \"Qin, Yujia and\n Qian, Cheng and\n Yi, Jing and\n Chen, Weize and\n Lin, Yankai and\n Han, Xu and\n Liu, Zhiyuan and\n Sun, Maosong and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.451/\",\n doi = \"10.18653/v1/2022.emnlp-main.451\",\n pages = \"6726--6746\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.451.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.451/", + "pdf_size": 1756131, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11042705686400385844&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing; Gaoling School of Artificial Intelligence, Renmin University of China, Beijing+Beijing Key Laboratory of Big Data Management and Analysis Methods, Beijing; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing+International Innovation Center of Tsinghua University, Shanghai+Quan Cheng Laboratory; NLP Group, DCST, IAI, BNRIST, Tsinghua University, Beijing+International Innovation Center of Tsinghua University, Shanghai+Quan Cheng Laboratory; Pattern Recognition Center, WeChat AI, Tencent Inc.", + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ; ; ; ", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ; ; ; ", + "github": "https://github.com/thunlp/Mode-Connectivity-PLM", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;1+2;0;0+0+3;0+0+3;4", + "aff_unique_norm": "Tsinghua University;Renmin University of China;Beijing Key Laboratory of Big Data Management and Analysis Methods;Quan Cheng Laboratory;Tencent Inc.", + "aff_unique_dep": "NLP Group;Gaoling School of Artificial Intelligence;Big Data Management and Analysis;;Pattern Recognition Center, WeChat AI", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.ruc.edu.cn;;;https://www.tencent.com", + "aff_unique_abbr": "THU;RUC;;;Tencent", + "aff_campus_unique_index": "0;0;0;0;0;0;0+2;0+2", + "aff_campus_unique": "Beijing;;Shanghai", + "aff_country_unique_index": "0;0;0;0;0+0;0;0+0;0+0;0", + "aff_country_unique": "China;" + }, + { + "id": "2022.findings-emnlp.198", + "title": "Exploring Predictive Uncertainty and Calibration in NLP: A Study on the Impact of Method & Data Scarcity", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We investigate the problem of determining the predictive confidence (or, conversely, uncertainty) of a neural classifier through the lens of low-resource languages. By training models on sub-sampled datasets in three different languages, we assess the quality of estimates from a wide array of approaches and their dependence on the amount of available data. We find that while approaches based on pre-trained models and ensembles achieve the best results overall, the quality of uncertainty estimates can surprisingly suffer with more data. We also perform a qualitative analysis of uncertainties on sequences, discovering that a model\u2019s total uncertainty seems to be influenced to a large degree by its data uncertainty, not model uncertainty. All model implementations are open-sourced in a software package.", + "author": "Dennis Ulmer; Jes Frellsen; Christian Hardmeier", + "authorids": "/d/dennis-ulmer/; /j/jes-frellsen/; /c/christian-hardmeier/", + "bibtex": "@inproceedings{ulmer-etal-2022-exploring,\n title = \"Exploring Predictive Uncertainty and Calibration in {NLP}: A Study on the Impact of Method {\\&} Data Scarcity\",\n author = \"Ulmer, Dennis and\n Frellsen, Jes and\n Hardmeier, Christian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.198/\",\n doi = \"10.18653/v1/2022.findings-emnlp.198\",\n pages = \"2707--2735\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.198.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.198/", + "pdf_size": 1443141, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4570412726369769672&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science, IT University of Copenhagen\u263c; Department of Applied Mathematics & Computer Science, Technical University of Denmark\u2642; Department of Computer Science, IT University of Copenhagen\u263c", + "aff_domain": "mailbox.org; ; ", + "email": "mailbox.org; ; ", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "IT University of Copenhagen;Technical University of Denmark", + "aff_unique_dep": "Department of Computer Science;Department of Applied Mathematics & Computer Science", + "aff_unique_url": "https://itu.dk;https://www DTU.dk", + "aff_unique_abbr": "ITU;DTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Denmark" + }, + { + "id": "2022.emnlp-main.327", + "title": "Exploring Representation-level Augmentation for Code Search", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Code search, which aims at retrieving the most relevant code fragment for a given natural language query, is a common activity in software development practice. Recently, contrastive learning is widely used in code search research, where many data augmentation approaches for source code (e.g., semantic-preserving program transformation) are proposed to learn better representations. However, these augmentations are at the raw-data level, which requires additional code analysis in the preprocessing stage and additional training cost in the training stage. In this paper, we explore augmentation methods that augment data (both code and query) at representation level which does not require additional data processing and training, and based on this we propose a general format of representation-level augmentation that unifies existing methods. Then, we propose three new augmentation methods (linear extrapolation, binary interpolation, and Gaussian scaling) based on the general format. Furthermore, we theoretically analyze the advantages of the proposed augmentation methods over traditional contrastive learning methods on code search. We experimentally evaluate the proposed representation-level augmentation methods with state-of-the-art code search models on a large-scale public dataset consisting of six programming languages. The experimental results show that our approach can consistently boost the performance of the studied code search models.", + "author": "Haochen Li; Chunyan Miao; Cyril Leung; Yanxian Huang; Yuan Huang; Hongyu Zhang; Yanlin Wang", + "authorids": "/h/haochen-li/; /c/chunyan-miao/; /c/cyril-leung/; /y/yanxian-huang/; /y/yuan-huang/; /h/hongyu-zhang/; /y/yanlin-wang/", + "bibtex": "@inproceedings{li-etal-2022-exploring-representation,\n title = \"Exploring Representation-level Augmentation for Code Search\",\n author = \"Li, Haochen and\n Miao, Chunyan and\n Leung, Cyril and\n Huang, Yanxian and\n Huang, Yuan and\n Zhang, Hongyu and\n Wang, Yanlin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.327/\",\n doi = \"10.18653/v1/2022.emnlp-main.327\",\n pages = \"4924--4936\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.327.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.327/", + "pdf_size": 365280, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18442996541183132487&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "https://github.com/Alex-HaochenLi/RACS", + "project": "", + "author_num": 7 + }, + { + "id": "2022.findings-emnlp.441", + "title": "Exploring The Landscape of Distributional Robustness for Question Answering Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We conduct a large empirical evaluation to investigate the landscape of distributional robustness in question answering. Our investigation spans over 350 models and 16 question answering datasets, including a diverse set of architectures, model sizes, and adaptation methods (e.g., fine-tuning, adapter tuning, in-context learning, etc.). We find that, in many cases, model variations do not affect robustness and in-distribution performance alone determines out-of-distribution performance.Moreover, our findings indicate thati) zero-shot and in-context learning methods are more robust to distribution shifts than fully fine-tuned models;ii) few-shot prompt fine-tuned models exhibit better robustness than few-shot fine-tuned span prediction models;iii) parameter-efficient and robustness enhancing training methods provide no significant robustness improvements.In addition, we publicly release all evaluations to encourage researchers to further analyze robustness trends for question answering models.", + "author": "Anas Awadalla; Mitchell Wortsman; Gabriel Ilharco; Sewon Min; Ian Magnusson; Hannaneh Hajishirzi; Ludwig Schmidt", + "authorids": "/a/anas-awadalla/; /m/mitchell-wortsman/; /g/gabriel-ilharco/; /s/sewon-min/; /i/ian-magnusson/; /h/hannaneh-hajishirzi/; /l/ludwig-schmidt/", + "bibtex": "@inproceedings{awadalla-etal-2022-exploring,\n title = \"Exploring The Landscape of Distributional Robustness for Question Answering Models\",\n author = \"Awadalla, Anas and\n Wortsman, Mitchell and\n Ilharco, Gabriel and\n Min, Sewon and\n Magnusson, Ian and\n Hajishirzi, Hannaneh and\n Schmidt, Ludwig\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.441/\",\n doi = \"10.18653/v1/2022.findings-emnlp.441\",\n pages = \"5971--5987\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.441.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.441/", + "pdf_size": 919007, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11100713009084799788&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 4, + "aff": "University of Washington; University of Washington; University of Washington; University of Washington; Allen Institute for AI; University of Washington + Allen Institute for AI; University of Washington + Allen Institute for AI", + "aff_domain": "cs.washington.edu; ; ; ; ; ; ", + "email": "cs.washington.edu; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;0+1;0+1", + "aff_unique_norm": "University of Washington;Allen Institute for AI", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.washington.edu;https://allenai.org", + "aff_unique_abbr": "UW;AI2", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.237", + "title": "Exploring the Secrets Behind the Learning Difficulty of Meaning Representations for Semantic Parsing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Previous research has shown that the design of Meaning Representation (MR) greatly influences the final model performance of a neural semantic parser. Therefore, designing a good MR is a long-term goal for semantic parsing. However, it is still an art as there is no quantitative indicator that can tell us which MR among a set of candidates may have the best final model performance. In practice, in order toselect an MR design, researchers often have to go through the whole training-testing process for all design candidates, and the process often costs a lot. In this paper, we propose a data-aware metric called ISS (denoting incremental structural stability) of MRs, and demonstrate that ISS is highly correlated with the final performance. The finding shows that ISS can be used as an indicator for MR design to avoid the costly training-testing process.", + "author": "Zhenwen Li; Jiaqi Guo; Qian Liu; Jian-Guang Lou; Tao Xie", + "authorids": "/z/zhenwen-li/; /j/jiaqi-guo/; /q/qian-liu/; /j/jian-guang-lou/; /t/tao-xie/", + "bibtex": "@inproceedings{li-etal-2022-exploring-secrets,\n title = \"Exploring the Secrets Behind the Learning Difficulty of Meaning Representations for Semantic Parsing\",\n author = \"Li, Zhenwen and\n Guo, Jiaqi and\n Liu, Qian and\n Lou, Jian-Guang and\n Xie, Tao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.237/\",\n doi = \"10.18653/v1/2022.emnlp-main.237\",\n pages = \"3616--3625\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.237.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.237/", + "pdf_size": 370878, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15219184177635954595&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Peking University, Beijing, China; Xi'an Jiaotong University, Xi'an, China; Sea AI Lab, Singapore; Microsoft Research Asia, Beijing, China; Peking University, Beijing, China", + "aff_domain": "pku.edu.cn;stu.xjtu.edu.cn;sea.com;microsoft.com;pku.edu.cn", + "email": "pku.edu.cn;stu.xjtu.edu.cn;sea.com;microsoft.com;pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;0", + "aff_unique_norm": "Peking University;Xi'an Jiaotong University;Sea AI Lab;Microsoft Research Asia", + "aff_unique_dep": ";;;Research", + "aff_unique_url": "http://www.pku.edu.cn;http://www.xjtu.edu.cn;;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "Peking U;XJTU;;MSRA", + "aff_campus_unique_index": "0;1;0;0", + "aff_campus_unique": "Beijing;Xi'an;", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.findings-emnlp.47", + "title": "Expose Backdoors on the Way: A Feature-Based Efficient Defense against Textual Backdoor Attacks", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Natural language processing (NLP) models are known to be vulnerable to backdoor attacks, which poses a newly arisen threat to NLP models. Prior online backdoor defense methods for NLP models only focus on the anomalies at either the input or output level, still suffering from fragility to adaptive attacks and high computational cost. In this work, we take the first step to investigate the unconcealment of textual poisoned samples at the intermediate-feature level and propose a feature-based efficient online defense method. Through extensive experiments on existing attacking methods, we find that the poisoned samples are far away from clean samples in the intermediate feature space of a poisoned NLP model. Motivated by this observation, we devise a distance-based anomaly score (DAN) to distinguish poisoned samples from clean samples at the feature level. Experiments on sentiment analysis and offense detection tasks demonstrate the superiority of DAN, as it substantially surpasses existing online defense methods in terms of defending performance and enjoys lower inference costs. Moreover, we show that DAN is also resistant to adaptive attacks based on feature-level regularization. Our code is available at https://github.com/lancopku/DAN.", + "author": "Sishuo Chen; Wenkai Yang; Zhiyuan Zhang; Xiaohan Bi; Xu Sun", + "authorids": "/s/sishuo-chen/; /w/wenkai-yang/; /z/zhiyuan-zhang/; /x/xiaohan-bi/; /x/xu-sun/", + "bibtex": "@inproceedings{chen-etal-2022-expose,\n title = \"Expose Backdoors on the Way: A Feature-Based Efficient Defense against Textual Backdoor Attacks\",\n author = \"Chen, Sishuo and\n Yang, Wenkai and\n Zhang, Zhiyuan and\n Bi, Xiaohan and\n Sun, Xu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.47/\",\n doi = \"10.18653/v1/2022.findings-emnlp.47\",\n pages = \"668--683\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.47.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.47/", + "pdf_size": 524972, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15303901423064859042&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 4, + "aff": "Center for Data Science, Peking University; Center for Data Science, Peking University; MOE Key Laboratory of Computational Linguistics, School of Computer Science, Peking University; Center for Data Science, Peking University; MOE Key Laboratory of Computational Linguistics, School of Computer Science, Peking University", + "aff_domain": "pku.edu.cn;stu.pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;stu.pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "github": "https://github.com/lancopku/DAN", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "Center for Data Science", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.604", + "title": "Extending Logic Explained Networks to Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently, Logic Explained Networks (LENs) have been proposed as explainable-by-design neural models providing logic explanations for their predictions.However, these models have only been applied to vision and tabular data, and they mostly favour the generation of global explanations, while local ones tend to be noisy and verbose.For these reasons, we propose LENp, improving local explanations by perturbing input words, and we test it on text classification. Our results show that (i) LENp provides better local explanations than LIME in terms of sensitivity and faithfulness, and (ii) its logic explanations are more useful and user-friendly than the feature scoring provided by LIME as attested by a human survey.", + "author": "Rishabh Jain; Gabriele Ciravegna; Pietro Barbiero; Francesco Giannini; Davide Buffelli; Pietro Lio", + "authorids": "/r/rishabh-jain/; /g/gabriele-ciravegna/; /p/pietro-barbiero/; /f/francesco-giannini/; /d/davide-buffelli/; /p/pietro-lio/", + "bibtex": "@inproceedings{jain-etal-2022-extending,\n title = \"Extending Logic Explained Networks to Text Classification\",\n author = \"Jain, Rishabh and\n Ciravegna, Gabriele and\n Barbiero, Pietro and\n Giannini, Francesco and\n Buffelli, Davide and\n Lio, Pietro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.604/\",\n doi = \"10.18653/v1/2022.emnlp-main.604\",\n pages = \"8838--8857\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.604.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.604/", + "pdf_size": 800067, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3128097653269363618&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 15, + "aff": "University of Cambridge, Cambridge, UK; Universit\u00e9 C\u00f4te d\u2019Azur, Inria, CNRS, I3S, Maasai, Nice, France; University of Cambridge, Cambridge, UK; University of Siena, Siena, Italy; University of Cambridge, Cambridge, UK; University of Cambridge, Cambridge, UK", + "aff_domain": "cam.ac.uk; ; ; ; ; ", + "email": "cam.ac.uk; ; ; ; ; ", + "github": "", + "project": "https://pypi.org/project/torch-explain/8838", + "author_num": 6, + "aff_unique_index": "0;1;0;2;0;0", + "aff_unique_norm": "University of Cambridge;Universit\u00e9 C\u00f4te d\u2019Azur;University of Siena", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.cam.ac.uk;https://www.univ-cotedazur.fr;https://www.unisi.it", + "aff_unique_abbr": "Cambridge;UCA;UniSi", + "aff_campus_unique_index": "0;1;0;2;0;0", + "aff_campus_unique": "Cambridge;Nice;Siena", + "aff_country_unique_index": "0;1;0;2;0;0", + "aff_country_unique": "United Kingdom;France;Italy" + }, + { + "id": "2022.emnlp-main.518", + "title": "Extending Phrase Grounding with Pronouns in Visual Dialogues", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Conventional phrase grounding aims to localize noun phrases mentioned in a given caption to their corresponding image regions, which has achieved great success recently. Apparently, sole noun phrase grounding is not enough for cross-modal visual language understanding. Here we extend the task by considering pronouns as well. First, we construct a dataset of phrase grounding with both noun phrases and pronouns to image regions. Based on the dataset, we test the performance of phrase grounding by using a state-of-the-art literature model of this line. Then, we enhance the baseline grounding model with coreference information which should help our task potentially, modeling the coreference structures with graph convolutional networks. Experiments on our dataset, interestingly, show that pronouns are easier to ground than noun phrases, where the possible reason might be that these pronouns are much less ambiguous. Additionally, our final model with coreference information can significantly boost the grounding performance of both noun phrases and pronouns.", + "author": "Panzhong Lu; Xin Zhang; Meishan Zhang; Min Zhang", + "authorids": "/p/panzhong-lu/; /x/xin-zhang/; /m/meishan-zhang/; /m/min-zhang/", + "bibtex": "@inproceedings{lu-etal-2022-extending,\n title = \"Extending Phrase Grounding with Pronouns in Visual Dialogues\",\n author = \"Lu, Panzhong and\n Zhang, Xin and\n Zhang, Meishan and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.518/\",\n doi = \"10.18653/v1/2022.emnlp-main.518\",\n pages = \"7614--7625\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.518.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.518/", + "pdf_size": 2612147, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12586821159649821649&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "School of New Media and Communication, Tianjin University; School of New Media and Communication, Tianjin University; Institute of Computing and Intelligence, Harbin Institute of Technology (Shenzhen); Institute of Computing and Intelligence, Harbin Institute of Technology (Shenzhen)", + "aff_domain": "tju.edu.cn;tju.edu.cn;hit.edu.cn;hit.edu.cn", + "email": "tju.edu.cn;tju.edu.cn;hit.edu.cn;hit.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;1", + "aff_unique_norm": "Tianjin University;Harbin Institute of Technology", + "aff_unique_dep": "School of New Media and Communication;Institute of Computing and Intelligence", + "aff_unique_url": "http://www.tju.edu.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": "Tianjin University;HIT", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.99", + "title": "Extracted BERT Model Leaks More Information than You Think!", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The collection and availability of big data, combined with advances in pre-trained models (e.g. BERT), have revolutionized the predictive performance of natural language processing tasks. This allows corporations to provide machine learning as a service (MLaaS) by encapsulating fine-tuned BERT-based models as APIs. Due to significant commercial interest, there has been a surge of attempts to steal remote services via model extraction. Although previous works have made progress in defending against model extraction attacks, there has been little discussion on their performance in preventing privacy leakage. This work bridges this gap by launching an attribute inference attack against the extracted BERT model. Our extensive experiments reveal that model extraction can cause severe privacy leakage even when victim models are facilitated with state-of-the-art defensive strategies.", + "author": "Xuanli He; Lingjuan Lyu; Chen Chen; Qiongkai Xu", + "authorids": "/x/xuanli-he/; /l/lingjuan-lyu/; /c/chen-chen/; /q/qiongkai-xu/", + "bibtex": "@inproceedings{he-etal-2022-extracted,\n title = \"Extracted {BERT} Model Leaks More Information than You Think!\",\n author = \"He, Xuanli and\n Lyu, Lingjuan and\n Chen, Chen and\n Xu, Qiongkai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.99/\",\n doi = \"10.18653/v1/2022.emnlp-main.99\",\n pages = \"1530--1537\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.99.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.99/", + "pdf_size": 317228, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12973448230352976839&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University College London; Zhejiang University; Sony AI; The University of Melbourne", + "aff_domain": "gmail.com;zju.edu.cn;sony.com;unimelb.edu.au", + "email": "gmail.com;zju.edu.cn;sony.com;unimelb.edu.au", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "University College London;Zhejiang University;Sony;University of Melbourne", + "aff_unique_dep": ";;Sony AI;", + "aff_unique_url": "https://www.ucl.ac.uk;https://www.zju.edu.cn;https://www.sony.com;https://www.unimelb.edu.au", + "aff_unique_abbr": "UCL;ZJU;Sony AI;UniMelb", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;3", + "aff_country_unique": "United Kingdom;China;Japan;Australia" + }, + { + "id": "2022.findings-emnlp.85", + "title": "Extracting Trigger-sharing Events via an Event Matrix", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A growing interest emerges in event extraction which aims to extract multiple events with triggers and arguments. Previous methods mitigate the problem of multiple events extraction by predicting the arguments conditioned on the event trigger and event type, assuming that these arguments belong to a single event. However, the assumption is invalid in general as there may be multiple events. Therefore, we present a unified framework called MatEE for trigger-sharing events extraction. It resolves the kernel bottleneck by effectively modeling the relations between arguments by an event matrix, where trigger-sharing events are represented by multiple cliques. We verify the proposed method on 3 widely-used benchmark datasets of event extraction. The experimental results show that it beats all the advanced competitors, significantly improving the state-of-the-art performances in event extraction.", + "author": "Jun Xu; Weidi Xu; Mengshu Sun; Taifeng Wang; Wei Chu", + "authorids": "/j/jun-xu/; /w/weidi-xu/; /m/mengshu-sun/; /t/taifeng-wang/; /w/wei-chu/", + "bibtex": "@inproceedings{xu-etal-2022-extracting,\n title = \"Extracting Trigger-sharing Events via an Event Matrix\",\n author = \"Xu, Jun and\n Xu, Weidi and\n Sun, Mengshu and\n Wang, Taifeng and\n Chu, Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.85/\",\n doi = \"10.18653/v1/2022.findings-emnlp.85\",\n pages = \"1189--1201\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.85.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.85/", + "pdf_size": 925699, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1175831084449625638&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Ant Group, Hangzhou, China; Ant Group, Hangzhou, China; Ant Group, Hangzhou, China; Ant Group, Hangzhou, China; Ant Group, Hangzhou, China", + "aff_domain": "antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com", + "email": "antgroup.com;antgroup.com;antgroup.com;antgroup.com;antgroup.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Ant Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.antgroup.com", + "aff_unique_abbr": "Ant Group", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Hangzhou", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.134", + "title": "Extractive Summarization of Legal Decisions using Multi-task Learning and Maximal Marginal Relevance", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Summarizing legal decisions requires the expertise of law practitioners, which is both time- and cost-intensive. This paper presents techniques for extractive summarization of legal decisions in a low-resource setting using limited expert annotated data. We test a set of models that locate relevant content using a sequential model and tackle redundancy by leveraging maximal marginal relevance to compose summaries. We also demonstrate an implicit approach to help train our proposed models generate more informative summaries. Our multi-task learning model variant leverages rhetorical role identification as an auxiliary task to further improve the summarizer. We perform extensive experiments on datasets containing legal decisions from the US Board of Veterans\u2019 Appeals and conduct quantitative and expert-ranked evaluations of our models. Our results show that the proposed approaches can achieve ROUGE scores vis-\u00e0-vis expert extracted summaries that match those achieved by inter-annotator comparison.", + "author": "Abhishek Agarwal; Shanshan Xu; Matthias Grabmair", + "authorids": "/a/abhishek-agarwal/; /s/shanshan-xu/; /m/matthias-grabmair/", + "bibtex": "@inproceedings{agarwal-etal-2022-extractive,\n title = \"Extractive Summarization of Legal Decisions using Multi-task Learning and Maximal Marginal Relevance\",\n author = \"Agarwal, Abhishek and\n Xu, Shanshan and\n Grabmair, Matthias\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.134/\",\n doi = \"10.18653/v1/2022.findings-emnlp.134\",\n pages = \"1857--1872\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.134.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.134/", + "pdf_size": 902796, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13250740929274087956&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Technical University of Munich, Germany; Technical University of Munich, Germany; Technical University of Munich, Germany", + "aff_domain": "tum.de;tum.de;tum.de", + "email": "tum.de;tum.de;tum.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Technical University of Munich", + "aff_unique_dep": "", + "aff_unique_url": "https://www.tum.de", + "aff_unique_abbr": "TUM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.222", + "title": "FCGCL: Fine- and Coarse-Granularity Contrastive Learning for Speech Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "It is notoriously difficult to implement end-to-end speech translation (E2E-ST) model because of the task complexity and data scarcity. Existing techniques often attempt to carry out implicit knowledge transfer from machine translation (MT) to ST model by imposing various constraints. However, in this transfer scenario, a significant problem is that the performance of the MT will drop significantly and the final transfer effect is also restricted. In this article, we recommend Fine and Coarse Granularity Contrastive Learning (FCGCL), which conduct explicit knowledge transfer from MT to ST model. Specially, we ensure through multi granularity contrastive learning that inputs with similar semantic between different modalities are encoded closely in the shared semantic space while inputs with different semantics are kept apart. Experiments on the MuST-C datasets on all 8 languages and further analysis show that our method can effectively improve the E2E-ST performance and achieves an average BLEU of 29.0.", + "author": "Hao Zhang; Nianwen Si; Yaqi Chen; Zhen Li; Tong Niu; Xukui Yang; Dan Qu", + "authorids": "/h/hao-zhang/; /n/nianwen-si/; /y/yaqi-chen/; /z/zhen-li/; /t/tong-niu/; /x/xukui-yang/; /d/dan-qu/", + "bibtex": "@inproceedings{zhang-etal-2022-fcgcl,\n title = \"{FCGCL}: Fine- and Coarse-Granularity Contrastive Learning for Speech Translation\",\n author = \"Zhang, Hao and\n Si, Nianwen and\n Chen, Yaqi and\n Li, Zhen and\n Niu, Tong and\n Yang, Xukui and\n Qu, Dan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.222/\",\n doi = \"10.18653/v1/2022.findings-emnlp.222\",\n pages = \"3048--3059\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.222.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.222/", + "pdf_size": 646302, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=712103511675999462&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "University of Information Engineering, Zhengzhou, China; University of Information Engineering, Zhengzhou, China; University of Information Engineering, Zhengzhou, China; University of Information Engineering, Zhengzhou, China; University of Information Engineering, Zhengzhou, China; University of Information Engineering, Zhengzhou, China; University of Information Engineering, Zhengzhou, China", + "aff_domain": "163.com;163.com;163.com;163.com;sina.com;sina.com;sina.com", + "email": "163.com;163.com;163.com;163.com;sina.com;sina.com;sina.com", + "github": "https://github.com/zhhao/fcgcl", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "University of Information Engineering", + "aff_unique_dep": "", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Zhengzhou", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.137", + "title": "FCGEC: Fine-Grained Corpus for Chinese Grammatical Error Correction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Grammatical Error Correction (GEC) has been broadly applied in automatic correction and proofreading system recently. However, it is still immature in Chinese GEC due to limited high-quality data from native speakers in terms of category and scale. In this paper, we present FCGEC, a fine-grained corpus to detect, identify and correct the grammatical errors. FCGEC is a human-annotated corpus with multiple references, consisting of 41,340 sentences collected mainly from multi-choice questions in public school Chinese examinations. Furthermore, we propose a Switch-Tagger-Generator (STG) baseline model to correct the grammatical errors in low-resource settings. Compared to other GEC benchmark models, experimental results illustrate that STG outperforms them on our FCGEC. However, there exists a significant gap between benchmark models and humans that encourages future models to bridge it.", + "author": "Lvxiaowei Xu; Jianwang Wu; Jiawei Peng; Jiayu Fu; Ming Cai", + "authorids": "/l/lvxiaowei-xu/; /j/jianwang-wu/; /j/jiawei-peng/; /j/jiayu-fu/; /m/ming-cai/", + "bibtex": "@inproceedings{xu-etal-2022-fcgec,\n title = \"{FCGEC}: Fine-Grained Corpus for {C}hinese Grammatical Error Correction\",\n author = \"Xu, Lvxiaowei and\n Wu, Jianwang and\n Peng, Jiawei and\n Fu, Jiayu and\n Cai, Ming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.137/\",\n doi = \"10.18653/v1/2022.findings-emnlp.137\",\n pages = \"1900--1918\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.137.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.137/", + "pdf_size": 1576109, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6165856489500342250&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science and Technology, Zhejiang University; Department of Computer Science and Technology, Zhejiang University; Department of Computer Science and Technology, Zhejiang University; Base Station Platform Software Development Dept, Huawei Co., Ltd.; Department of Computer Science and Technology, Zhejiang University", + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;gmail.com;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;gmail.com;zju.edu.cn", + "github": "https://github.com/xlxwalex/FCGEC", + "project": "https://codalab.lisn.upsaclay.fr/competitions/8020", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Zhejiang University;Huawei Co., Ltd.", + "aff_unique_dep": "Department of Computer Science and Technology;Base Station Platform Software Development Dept", + "aff_unique_url": "http://www.zju.edu.cn;https://www.huawei.com", + "aff_unique_abbr": "ZJU;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.751", + "title": "FETA: A Benchmark for Few-Sample Task Transfer in Open-Domain Dialogue", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Task transfer, transferring knowledge contained in related tasks, holds the promise of reducing the quantity of labeled data required to fine-tune language models. Dialogue understanding encompasses many diverse tasks, yet task transfer has not been thoroughly studied in conversational AI. This work explores conversational task transfer by introducing FETA: a benchmark for FEw-sample TAsk transfer in open-domain dialogue.FETA contains two underlying sets of conversations upon which there are 10 and 7 tasks annotated, enabling the study of intra-dataset task transfer; task transfer without domain adaptation. We utilize three popular language models and three learning algorithms to analyze the transferability between 132 source-target task pairs and create a baseline for future work.We run experiments in the single- and multi-source settings and report valuable findings, e.g., most performance trends are model-specific, and span extraction and multiple-choice tasks benefit the most from task transfer.In addition to task transfer, FETA can be a valuable resource for future research into the efficiency and generalizability of pre-training datasets and model architectures, as well as for learning settings such as continual and multitask learning.", + "author": "Alon Albalak; Yi-Lin Tuan; Pegah Jandaghi; Connor Pryor; Luke Yoffe; Deepak Ramachandran; Lise Getoor; Jay Pujara; William Yang Wang", + "authorids": "/a/alon-albalak/; /y/yi-lin-tuan/; /p/pegah-jandaghi/; /c/connor-pryor/; /l/luke-yoffe/; /d/deepak-ramachandran/; /l/lise-getoor/; /j/jay-pujara/; /w/william-yang-wang/", + "bibtex": "@inproceedings{albalak-etal-2022-feta,\n title = \"{FETA}: A Benchmark for Few-Sample Task Transfer in Open-Domain Dialogue\",\n author = \"Albalak, Alon and\n Tuan, Yi-Lin and\n Jandaghi, Pegah and\n Pryor, Connor and\n Yoffe, Luke and\n Ramachandran, Deepak and\n Getoor, Lise and\n Pujara, Jay and\n Wang, William Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.751/\",\n doi = \"10.18653/v1/2022.emnlp-main.751\",\n pages = \"10936--10953\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.751.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.751/", + "pdf_size": 4777884, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16632781195544805953&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "University of California, Santa Barbara; University of California, Santa Barbara; University of Southern California; University of California, Santa Cruz; University of California, Santa Barbara; Google Research; University of California, Santa Cruz; University of Southern California; University of California, Santa Barbara", + "aff_domain": "ucsb.edu; ; ; ; ; ; ; ; ", + "email": "ucsb.edu; ; ; ; ; ; ; ; ", + "github": "", + "project": "alon-albalak.github.io/feta-website", + "author_num": 9, + "aff_unique_index": "0;0;1;2;0;3;2;1;0", + "aff_unique_norm": "University of California, Santa Barbara;University of Southern California;University of California, Santa Cruz;Google", + "aff_unique_dep": ";;;Google Research", + "aff_unique_url": "https://www.ucsb.edu;https://www.usc.edu;https://www.ucsc.edu;https://research.google", + "aff_unique_abbr": "UCSB;USC;UCSC;Google Research", + "aff_campus_unique_index": "0;0;1;2;0;3;2;1;0", + "aff_campus_unique": "Santa Barbara;Los Angeles;Santa Cruz;Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.481", + "title": "FLUTE: Figurative Language Understanding through Textual Explanations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Figurative language understanding has been recently framed as a recognizing textual entailment (RTE) task (a.k.a. natural language inference (NLI)). However, similar to classical RTE/NLI datasets they suffer from spurious correlations and annotation artifacts. To tackle this problem, work on NLI has built explanation-based datasets such as eSNLI, allowing us to probe whether language models are right for the right reasons. Yet no such data exists for figurative language, making it harder to assess genuine understanding of such expressions. To address this issue, we release FLUTE, a dataset of 9,000 figurative NLI instances with explanations, spanning four categories: Sarcasm, Simile, Metaphor, and Idioms. We collect the data through a Human-AI collaboration framework based on GPT-3, crowd workers, and expert annotators. We show how utilizing GPT-3 in conjunction with human annotators (novices and experts) can aid in scaling up the creation of datasets even for such complex linguistic phenomena as figurative language. The baseline performance of the T5 model fine-tuned on FLUTE shows that our dataset can bring us a step closer to developing models that understand figurative language through textual explanations.", + "author": "Tuhin Chakrabarty; Arkadiy Saakyan; Debanjan Ghosh; Smaranda Muresan", + "authorids": "/t/tuhin-chakrabarty/; /a/arkadiy-saakyan/; /d/debanjan-ghosh/; /s/smaranda-muresan/", + "bibtex": "@inproceedings{chakrabarty-etal-2022-flute,\n title = \"{FLUTE}: Figurative Language Understanding through Textual Explanations\",\n author = \"Chakrabarty, Tuhin and\n Saakyan, Arkadiy and\n Ghosh, Debanjan and\n Muresan, Smaranda\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.481/\",\n doi = \"10.18653/v1/2022.emnlp-main.481\",\n pages = \"7139--7159\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.481.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.481/", + "pdf_size": 931314, + "gs_citation": 75, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16328693071629091221&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, Columbia University; Department of Computer Science, Columbia University; Educational Testing Service; Department of Computer Science, Columbia University", + "aff_domain": "cs.columbia.edu;columbia.edu;ets.org;cs.columbia.edu", + "email": "cs.columbia.edu;columbia.edu;ets.org;cs.columbia.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Columbia University;Educational Testing Service", + "aff_unique_dep": "Department of Computer Science;", + "aff_unique_url": "https://www.columbia.edu;https://www.ets.org", + "aff_unique_abbr": "Columbia;ETS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.511", + "title": "FPT: Improving Prompt Tuning Efficiency via Progressive Training", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recently, prompt tuning (PT) has gained increasing attention as a parameter-efficient way of tuning pre-trained language models (PLMs). Despite extensively reducing the number of tunable parameters and achieving satisfying performance, PT is training-inefficient due to its slow convergence. To improve PT\u2019s training efficiency, we first make some novel observations about the prompt transferability of \u201cpartial PLMs\u201d, which are defined by compressing a PLM in depth or width. We observe that the soft prompts learned by different partial PLMs of various sizes are similar in the parameter space, implying that these soft prompts could potentially be transferred among partial PLMs. Inspired by these observations, we propose Fast Prompt Tuning (FPT), which starts by conducting PT using a small-scale partial PLM, and then progressively expands its depth and width until the full-model size. After each expansion, we recycle the previously learned soft prompts as initialization for the enlarged partial PLM and then proceed PT. We demonstrate the feasibility of FPT on 5 tasks and show that FPT could save over 30% training computations while achieving comparable performance. The codes are publicly available at https://github.com/thunlp/FastPromptTuning.", + "author": "Yufei Huang; Yujia Qin; Huadong Wang; Yichun Yin; Maosong Sun; Zhiyuan Liu; Qun Liu", + "authorids": "/y/yufei-huang/; /y/yujia-qin/; /h/huadong-wang/; /y/yichun-yin/; /m/maosong-sun/; /z/zhiyuan-liu/; /q/qun-liu/", + "bibtex": "https://aclanthology.org/2022.findings-emnlp.511.bib", + "pdf": "https://aclanthology.org/2022.findings-emnlp.511.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.511/", + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15564782424571397722&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "", + "project": "", + "author_num": 7 + }, + { + "id": "2022.findings-emnlp.267", + "title": "FRSUM: Towards Faithful Abstractive Summarization via Enhancing Factual Robustness", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Despite being able to generate fluent and grammatical text, current Seq2Seq summarization models still suffering from the unfaithful generation problem.In this paper, we study the faithfulness of existing systems from a new perspective of factual robustness which is the ability to correctly generate factual information over adversarial unfaithful information.We first measure a model\u2019sfactual robustness by its success rate to defend against adversarial attacks when generating factual information.The factual robustness analysis on a wide range of current systems shows its good consistency with human judgments on faithfulness.Inspired by these findings, we propose to improve the faithfulness of a model by enhancing its factual robustness.Specifically, we propose a novel training strategy, namely FRSUM, which teaches the model to defend against both explicit adversarial samples and implicit factual adversarial perturbations.Extensive automatic and human evaluation results show that FRSUM consistently improves the faithfulness of various Seq2Seq models, such as T5, BART.", + "author": "Wenhao Wu; Wei Li; Jiachen Liu; Xinyan Xiao; Ziqiang Cao; Sujian Li; Hua Wu", + "authorids": "/w/wenhao-wu/; /w/wei-li/; /j/jiachen-liu/; /x/xinyan-xiao/; /z/ziqiang-cao/; /s/sujian-li/; /h/hua-wu/", + "bibtex": "@inproceedings{wu-etal-2022-frsum,\n title = \"{FRSUM}: Towards Faithful Abstractive Summarization via Enhancing Factual Robustness\",\n author = \"Wu, Wenhao and\n Li, Wei and\n Liu, Jiachen and\n Xiao, Xinyan and\n Cao, Ziqiang and\n Li, Sujian and\n Wu, Hua\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.267/\",\n doi = \"10.18653/v1/2022.findings-emnlp.267\",\n pages = \"3640--3654\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.267.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.267/", + "pdf_size": 530957, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3114420116198973295&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Key Laboratory of Computational Linguistics, MOE, Peking University+Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Institute of Artificial Intelligence, Soochow University, China; Key Laboratory of Computational Linguistics, MOE, Peking University; Baidu Inc., Beijing, China", + "aff_domain": "pku.edu.cn;baidu.com;baidu.com;baidu.com;suda.edu.cn;pku.edu.cn;baidu.com", + "email": "pku.edu.cn;baidu.com;baidu.com;baidu.com;suda.edu.cn;pku.edu.cn;baidu.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1;1;1;2;0;1", + "aff_unique_norm": "Peking University;Baidu Inc.;Soochow University", + "aff_unique_dep": "Key Laboratory of Computational Linguistics;;Institute of Artificial Intelligence", + "aff_unique_url": "http://www.pku.edu.cn;https://www.baidu.com;https://www.soochow.edu.cn", + "aff_unique_abbr": "PKU;Baidu;", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.716", + "title": "FaD-VLP: Fashion Vision-and-Language Pre-training towards Unified Retrieval and Captioning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multimodal tasks in the fashion domain have significant potential for e-commerce, but involve challenging vision-and-language learning problems\u2014e.g., retrieving a fashion item given a reference image plus text feedback from a user. Prior works on multimodal fashion tasks have either been limited by the data in individual benchmarks, or have leveraged generic vision-and-language pre-training but have not taken advantage of the characteristics of fashion data. Additionally, these works have mainly been restricted to multimodal understanding tasks. To address these gaps, we make two key contributions. First, we propose a novel fashion-specific pre-training framework based on weakly-supervised triplets constructed from fashion image-text pairs. We show the triplet-based tasks are an effective addition to standard multimodal pre-training tasks. Second, we propose a flexible decoder-based model architecture capable of both fashion retrieval and captioning tasks. Together, our model design and pre-training approach are competitive on a diverse set of fashion tasks, including cross-modal retrieval, image retrieval with text feedback, image captioning, relative image captioning, and multimodal categorization.", + "author": "Suvir Mirchandani; Licheng Yu; Mengjiao Wang; Animesh Sinha; Wenwen Jiang; Tao Xiang; Ning Zhang", + "authorids": "/s/suvir-mirchandani/; /l/licheng-yu/; /m/mengjiao-wang/; /a/animesh-sinha/; /w/wenwen-jiang/; /t/tao-xiang/; /n/ning-zhang/", + "bibtex": "@inproceedings{mirchandani-etal-2022-fad,\n title = \"{F}a{D}-{VLP}: Fashion Vision-and-Language Pre-training towards Unified Retrieval and Captioning\",\n author = \"Mirchandani, Suvir and\n Yu, Licheng and\n Wang, Mengjiao and\n Sinha, Animesh and\n Jiang, Wenwen and\n Xiang, Tao and\n Zhang, Ning\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.716/\",\n doi = \"10.18653/v1/2022.emnlp-main.716\",\n pages = \"10484--10497\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.716.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.716/", + "pdf_size": 2205260, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2672649031574433680&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Stanford University; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI / University of Surrey; Meta AI", + "aff_domain": "cs.stanford.edu;meta.com;meta.com;meta.com;meta.com;meta.com;meta.com", + "email": "cs.stanford.edu;meta.com;meta.com;meta.com;meta.com;meta.com;meta.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;1;1;2;1", + "aff_unique_norm": "Stanford University;Meta Platforms, Inc.;University of Surrey", + "aff_unique_dep": ";Meta AI;Meta AI", + "aff_unique_url": "https://www.stanford.edu;https://meta.com;https://www.surrey.ac.uk", + "aff_unique_abbr": "Stanford;Meta;UoS", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Stanford;", + "aff_country_unique_index": "0;0;0;0;0;1;0", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "2022.emnlp-main.219", + "title": "Face-Sensitive Image-to-Emotional-Text Cross-modal Translation for Multimodal Aspect-based Sentiment Analysis", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Aspect-level multimodal sentiment analysis, which aims to identify the sentiment of the target aspect from multimodal data, recently has attracted extensive attention in the community of multimedia and natural language processing. Despite the recent success in textual aspect-based sentiment analysis, existing models mainly focused on utilizing the object-level semantic information in the image but ignore explicitly using the visual emotional cues, especially the facial emotions. How to distill visual emotional cues and align them with the textual content remains a key challenge to solve the problem. In this work, we introduce a face-sensitive image-to-emotional-text translation (FITE) method, which focuses on capturing visual sentiment cues through facial expressions and selectively matching and fusing with the target aspect in textual modality. To the best of our knowledge, we are the first that explicitly utilize the emotional information from images in the multimodal aspect-based sentiment analysis task. Experiment results show that our method achieves state-of-the-art results on the Twitter-2015 and Twitter-2017 datasets. The improvement demonstrates the superiority of our model in capturing aspect-level sentiment in multimodal data with facial expressions.", + "author": "Hao Yang; Yanyan Zhao; Bing Qin", + "authorids": "/h/hao-yang/; /y/yanyan-zhao/; /b/bing-qin/", + "bibtex": "@inproceedings{yang-etal-2022-face,\n title = \"Face-Sensitive Image-to-Emotional-Text Cross-modal Translation for Multimodal Aspect-based Sentiment Analysis\",\n author = \"Yang, Hao and\n Zhao, Yanyan and\n Qin, Bing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.219/\",\n doi = \"10.18653/v1/2022.emnlp-main.219\",\n pages = \"3324--3335\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.219.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.219/", + "pdf_size": 6410440, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6051154360268243194&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Harbin Institute of Technology; Harbin Institute of Technology; Harbin Institute of Technology", + "aff_domain": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "email": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "github": "https://github.com/yhit98/FITE", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Harbin Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "http://www.hit.edu.cn/", + "aff_unique_abbr": "HIT", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Harbin", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.734", + "title": "Facilitating Contrastive Learning of Discourse Relational Senses by Exploiting the Hierarchy of Sense Relations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Implicit discourse relation recognition is a challenging task that involves identifying the sense or senses that hold between two adjacent spans of text, in the absense of an explicit connective between them. In both PDTB-2 (prasad et al., 2008) and PDTB-3 (Webber et al., 2019), discourse relational senses are organized into a three-level hierarchy ranging from four broad top-level senses, to more specific senses below them. Most previous work on implicitf discourse relation recognition have used the sense hierarchy simply to indicate what sense labels were available. Here we do more \u2014 incorporating the sense hierarchy into the recognition process itself and using it to select the negative examples used in contrastive learning. With no additional effort, the approach achieves state-of-the-art performance on the task. Our code is released inhttps://github.com/wanqiulong 0923/Contrastive_IDRR.", + "author": "Wanqiu Long; Bonnie Webber", + "authorids": "/w/wanqiu-long/; /b/bonnie-webber/", + "bibtex": "@inproceedings{long-webber-2022-facilitating,\n title = \"Facilitating Contrastive Learning of Discourse Relational Senses by Exploiting the Hierarchy of Sense Relations\",\n author = \"Long, Wanqiu and\n Webber, Bonnie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.734/\",\n doi = \"10.18653/v1/2022.emnlp-main.734\",\n pages = \"10704--10716\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.734.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.734/", + "pdf_size": 2334139, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17586925999116812864&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "University of Edinburgh, Edinburgh, UK; University of Edinburgh, Edinburgh, UK", + "aff_domain": "ed.ac.uk;ed.ac.uk", + "email": "ed.ac.uk;ed.ac.uk", + "github": "https://github.com/wanqiulong0923/Contrastive_IDRR", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Edinburgh", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ed.ac.uk", + "aff_unique_abbr": "Edinburgh", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Edinburgh", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-industry.46", + "title": "Fact Checking Machine Generated Text with Dependency Trees", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Factual and logical errors made by Natural Language Generation (NLG) systems limit their applicability in many settings. We study this problem in a conversational search and recommendation setting, and observe that we can often make two simplifying assumptions in this domain: (i) there exists a body of structured knowledge we can use for verifying factuality of generated text; and (ii) the text to be factually assessed typically has a well-defined structure and style. Grounded in these assumptions, we propose a fast, unsupervised and explainable technique, DepChecker, that assesses factuality of input text based on rules derived from structured knowledge patterns and dependency relations with respect to the input text. We show that DepChecker outperforms state-of-the-art, general purpose fact-checking techniques in this special, but important case.", + "author": "Alex Estes; Nikhita Vedula; Marcus Collins; Matt Cecil; Oleg Rokhlenko", + "authorids": "/a/alex-estes/; /n/nikhita-vedula/; /m/marcus-collins/; /m/matt-cecil/; /o/oleg-rokhlenko/", + "bibtex": "@inproceedings{estes-etal-2022-fact,\n title = \"Fact Checking Machine Generated Text with Dependency Trees\",\n author = \"Estes, Alex and\n Vedula, Nikhita and\n Collins, Marcus and\n Cecil, Matt and\n Rokhlenko, Oleg\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.46/\",\n doi = \"10.18653/v1/2022.emnlp-industry.46\",\n pages = \"458--466\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.46.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.46/", + "pdf_size": 300073, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=495990418702096090&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Balto, Seattle, WA + Amazon, Seattle, WA; Amazon, Seattle, WA; Amazon, Seattle, WA; Amazon, Seattle, WA; Amazon, Seattle, WA", + "aff_domain": "gmail.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "gmail.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;1;1;1", + "aff_unique_norm": "Balto;Amazon.com, Inc.", + "aff_unique_dep": ";", + "aff_unique_url": ";https://www.amazon.com", + "aff_unique_abbr": ";Amazon", + "aff_campus_unique_index": "0+0;0;0;0;0", + "aff_campus_unique": "Seattle", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.426", + "title": "Factorizing Content and Budget Decisions in Abstractive Summarization of Long Documents", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We argue that disentangling content selection from the budget used to cover salient content improves the performance and applicability of abstractive summarizers. Our method, FactorSum, does this disentanglement by factorizing summarization into two steps through an energy function: (1) generation of abstractive summary views covering salient information in subsets of the input document (document views); (2) combination of these views into a final summary, following a budget and content guidance. This guidance may come from different sources, including from an advisor model such as BART or BigBird, or in oracle mode \u2013 from the reference. This factorization achieves significantly higher ROUGE scores on multiple benchmarks for long document summarization, namely PubMed, arXiv, and GovReport. Most notably, our model is effective for domain adaptation. When trained only on PubMed samples, it achieves a 46.29 ROUGE-1 score on arXiv, outperforming PEGASUS trained in domain by a large margin. Our experimental results indicate that the performance gains are due to more flexible budget adaptation and processing of shorter contexts provided by partial document views.", + "author": "Marcio Fonseca; Yftah Ziser; Shay B. Cohen", + "authorids": "/m/marcio-fonseca/; /y/yftah-ziser/; /s/shay-b-cohen/", + "bibtex": "@inproceedings{fonseca-etal-2022-factorizing,\n title = \"Factorizing Content and Budget Decisions in Abstractive Summarization of Long Documents\",\n author = \"Fonseca, Marcio and\n Ziser, Yftah and\n Cohen, Shay B.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.426/\",\n doi = \"10.18653/v1/2022.emnlp-main.426\",\n pages = \"6341--6364\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.426.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.426/", + "pdf_size": 482504, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16358218324427285635&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Institute for Language, Cognition and Computation, School of Informatics, University of Edinburgh; Institute for Language, Cognition and Computation, School of Informatics, University of Edinburgh; Institute for Language, Cognition and Computation, School of Informatics, University of Edinburgh", + "aff_domain": "ed.ac.uk;ed.ac.uk;inf.ed.ac.uk", + "email": "ed.ac.uk;ed.ac.uk;inf.ed.ac.uk", + "github": "https://github.com/thefonseca/factorsum", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Edinburgh", + "aff_unique_dep": "School of Informatics", + "aff_unique_url": "https://www.ed.ac.uk", + "aff_unique_abbr": "Edinburgh", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Edinburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.480", + "title": "Factual Accuracy is not Enough: Planning Consistent Description Order for Radiology Report Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Radiology report generation systems have the potential to reduce the workload of radiologists by automatically describing the findings in medical images.To broaden the application of the report generation system, the system should generate reports that are not only factually accurate but also chronologically consistent, describing images that are presented in time order, that is, the correct order.We employ a planning-based radiology report generation system that generates the overall structure of reports as \u201cplans\u2019\u201d prior to generating reports that are accurate and consistent in order.Additionally, we propose a novel reinforcement learning and inference method, Coordinated Planning (CoPlan), that includes a content planner and a text generator to train and infer in a coordinated manner to alleviate the cascading of errors that are often inherent in planning-based models.We conducted experiments with single-phase diagnostic reports in which the factual accuracy is critical and multi-phase diagnostic reports in which the description order is critical.Our proposed CoPlan improves the content order score by 5.1 pt in time series critical scenarios and the clinical factual accuracy F-score by 9.1 pt in time series irrelevant scenarios, compared those of the baseline models without CoPlan.", + "author": "Toru Nishino; Yasuhide Miura; Tomoki Taniguchi; Tomoko Ohkuma; Yuki Suzuki; Shoji Kido; Noriyuki Tomiyama", + "authorids": "/t/toru-nishino/; /y/yasuhide-miura/; /t/tomoki-taniguchi/; /t/tomoko-ohkuma/; /y/yuki-suzuki/; /s/shoji-kido/; /n/noriyuki-tomiyama/", + "bibtex": "@inproceedings{nishino-etal-2022-factual,\n title = \"Factual Accuracy is not Enough: Planning Consistent Description Order for Radiology Report Generation\",\n author = \"Nishino, Toru and\n Miura, Yasuhide and\n Taniguchi, Tomoki and\n Ohkuma, Tomoko and\n Suzuki, Yuki and\n Kido, Shoji and\n Tomiyama, Noriyuki\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.480/\",\n doi = \"10.18653/v1/2022.emnlp-main.480\",\n pages = \"7123--7138\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.480.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.480/", + "pdf_size": 433134, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8759934712353106364&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Fujifilm Corporation; Fujifilm Corporation; Fujifilm Corporation; Fujifilm Corporation; Osaka University Graduate School of Medicine; Osaka University Graduate School of Medicine; Osaka University Graduate School of Medicine", + "aff_domain": "fujifilm.com; ; ; ; ; ; ", + "email": "fujifilm.com; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;1;1", + "aff_unique_norm": "Fujifilm Corporation;Osaka University", + "aff_unique_dep": ";Graduate School of Medicine", + "aff_unique_url": "https://www.fujifilm.com;https://www.osaka-u.ac.jp", + "aff_unique_abbr": "Fujifilm;OU", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Osaka", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.findings-emnlp.514", + "title": "Fair NLP Models with Differentially Private Text Encoders", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Encoded text representations often capture sensitive attributes about individuals (e.g., race or gender), which raise privacy concerns and can make downstream models unfair to certain groups. In this work, we propose FEDERATE, an approach that combines ideas from differential privacy and adversarial training to learn private text representations which also induces fairer models. We empirically evaluate the trade-off between the privacy of the representations and the fairness and accuracy of the downstream model on four NLP datasets. Our results show that FEDERATE consistently improves upon previous methods, and thus suggest that privacy and fairness can positively reinforce each other.", + "author": "Gaurav Maheshwari; Pascal Denis; Mikaela Keller; Aur\u00e9lien Bellet", + "authorids": "/g/gaurav-maheshwari/; /p/pascal-denis/; /m/mikaela-keller/; /a/aurelien-bellet/", + "bibtex": "@inproceedings{maheshwari-etal-2022-fair,\n title = \"Fair {NLP} Models with Differentially Private Text Encoders\",\n author = \"Maheshwari, Gaurav and\n Denis, Pascal and\n Keller, Mikaela and\n Bellet, Aur{\\'e}lien\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.514/\",\n doi = \"10.18653/v1/2022.findings-emnlp.514\",\n pages = \"6913--6930\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.514.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.514/", + "pdf_size": 546461, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16541952350384744515&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Univ. Lille, Inria, CNRS, Centrale Lille, UMR 9189 - CRIStAL, F-59000 Lille, France; Univ. Lille, Inria, CNRS, Centrale Lille, UMR 9189 - CRIStAL, F-59000 Lille, France; Univ. Lille, Inria, CNRS, Centrale Lille, UMR 9189 - CRIStAL, F-59000 Lille, France; Univ. Lille, Inria, CNRS, Centrale Lille, UMR 9189 - CRIStAL, F-59000 Lille, France", + "aff_domain": "inria.fr;inria.fr;inria.fr;inria.fr", + "email": "inria.fr;inria.fr;inria.fr;inria.fr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Lille", + "aff_unique_dep": "UMR 9189 - CRIStAL", + "aff_unique_url": "https://www.univ-lille.fr", + "aff_unique_abbr": "Univ. Lille", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Lille", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "France" + }, + { + "id": "2022.emnlp-main.743", + "title": "Faithful Knowledge Graph Explanations in Commonsense Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge graphs are commonly used as sources of information in commonsense question answering, and can also be used to express explanations for the model\u2019s answer choice. A common way of incorporating facts from the graph is to encode them separately from the question, and then combine the two representations to select an answer. In this paper, we argue that highly faithful graph-based explanations cannot be extracted from existing models of this type. Such explanations will not include reasoning done by the transformer encoding the question, so will be incomplete. We confirm this theory with a novel proxy measure for faithfulness and propose two architecture changes to address the problem. Our findings suggest a path forward for developing architectures for faithful graph-based explanations.", + "author": "Guy Aglionby; Simone Teufel", + "authorids": "/g/guy-aglionby/; /s/simone-teufel/", + "bibtex": "@inproceedings{aglionby-teufel-2022-faithful,\n title = \"Faithful Knowledge Graph Explanations in Commonsense Question Answering\",\n author = \"Aglionby, Guy and\n Teufel, Simone\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.743/\",\n doi = \"10.18653/v1/2022.emnlp-main.743\",\n pages = \"10811--10817\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.743.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.743/", + "pdf_size": 206084, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4953276345787839648&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 2, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2 + }, + { + "id": "2022.findings-emnlp.76", + "title": "Faithful to the Document or to the World? Mitigating Hallucinations via Entity-Linked Knowledge in Abstractive Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Existing abstractive summarization systems are hampered by content hallucinations in which models generate text that is not directly inferable from the source alone. Annotations from prior work have shown that some of these hallucinations, while being \u2018unfaithful\u2019 to the source, are nonetheless factual. Our analysis in this paper suggests that these factual hallucinations occur as a result of the prevalence of factual yet unfaithful entities in summarization datasets. We find that these entities are not aberrations, but instead examples of additional world knowledge being readily used to latently connect entities and concepts \u2013 in this case connecting entities in the source document to those in the target summary. In our analysis and experiments, we demonstrate that connecting entities to an external knowledge base can lend provenance to many of these unfaithful yet factual entities, and further, this knowledge can be used to improve the factuality of summaries without simply making them more extractive.", + "author": "Yue Dong; John Wieting; Pat Verga", + "authorids": "/y/yue-dong/; /j/john-wieting/; /p/pat-verga/", + "bibtex": "@inproceedings{dong-etal-2022-faithful,\n title = \"Faithful to the Document or to the World? Mitigating Hallucinations via Entity-Linked Knowledge in Abstractive Summarization\",\n author = \"Dong, Yue and\n Wieting, John and\n Verga, Pat\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.76/\",\n doi = \"10.18653/v1/2022.findings-emnlp.76\",\n pages = \"1067--1082\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.76.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.76/", + "pdf_size": 1140117, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9950583894458766394&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff": "Mila / McGill University; Google Research; Google Research", + "aff_domain": "mail.mcgill.ca;google.com;google.com", + "email": "mail.mcgill.ca;google.com;google.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "McGill University;Google", + "aff_unique_dep": "Mila;Google Research", + "aff_unique_url": "https://www.mcgill.ca;https://research.google", + "aff_unique_abbr": "McGill;Google Research", + "aff_campus_unique_index": "0;1;1", + "aff_campus_unique": "Montreal;Mountain View", + "aff_country_unique_index": "0;1;1", + "aff_country_unique": "Canada;United States" + }, + { + "id": "2022.emnlp-industry.41", + "title": "Fast Vocabulary Transfer for Language Model Compression", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Real-world business applications require a trade-off between language model performance and size. We propose a new method for model compression that relies on vocabulary transfer. We evaluate the method on various vertical domains and downstream tasks. Our results indicate that vocabulary transfer can be effectively used in combination with other compression techniques, yielding a significant reduction in model size and inference time while marginally compromising on performance.", + "author": "Leonidas Gee; Andrea Zugarini; Leonardo Rigutini; Paolo Torroni", + "authorids": "/l/leonidas-gee/; /a/andrea-zugarini/; /l/leonardo-rigutini/; /p/paolo-torroni/", + "bibtex": "@inproceedings{gee-etal-2022-fast,\n title = \"Fast Vocabulary Transfer for Language Model Compression\",\n author = \"Gee, Leonidas and\n Zugarini, Andrea and\n Rigutini, Leonardo and\n Torroni, Paolo\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.41/\",\n doi = \"10.18653/v1/2022.emnlp-industry.41\",\n pages = \"409--416\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.41.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.41/", + "pdf_size": 480342, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14089350525664918859&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "Expert.ai, Italy; Expert.ai, Italy; Expert.ai, Italy+University of Siena; Department of Computer Science and Engineering, University of Bologna", + "aff_domain": "expert.ai;expert.ai;expert.ai;unibo.it", + "email": "expert.ai;expert.ai;expert.ai;unibo.it", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;2", + "aff_unique_norm": "Expert.ai;University of Siena;University of Bologna", + "aff_unique_dep": ";;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.expert.ai;https://www.unisi.it;https://www.unibo.it", + "aff_unique_abbr": "Expert.ai;UniSi;UNIBO", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0", + "aff_country_unique": "Italy" + }, + { + "id": "2022.emnlp-main.181", + "title": "Fast-R2D2: A Pretrained Recursive Neural Network based on Pruned CKY for Grammar Induction and Text Representation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Chart-based models have shown great potential in unsupervised grammar induction, running recursively and hierarchically, but requiring O(n\u00b3) time-complexity. The Recursive Transformer based on Differentiable Trees (R2D2) makes it possible to scale to large language model pretraining even with a complex tree encoder, by introducing a heuristic pruning method.However, its rule-based pruning process suffers from local optima and slow inference. In this paper, we propose a unified R2D2 method that overcomes these issues. We use a top-down unsupervised parser as a model-guided pruning method, which also enables parallel encoding during inference. Our parser casts parsing as a split point scoring task by first scoring all split points for a given sentence and then using the highest-scoring one to recursively split a span into two parts. The reverse order of the splits is considered as the order of pruning in the encoder. We optimize the unsupervised parser by minimizing the Kullback\u2013Leibler distance between tree probabilities from the parser and the R2D2 model.Our experiments show that our Fast-R2D2 significantly improves the grammar induction quality and achieves competitive results in downstream tasks.", + "author": "Xiang Hu; Haitao Mi; Liang Li; Gerard de Melo", + "authorids": "/x/xiang-hu/; /h/haitao-mi/; /l/liang-li/; /g/gerard-de-melo/", + "bibtex": "@inproceedings{hu-etal-2022-fast,\n title = \"Fast-{R}2{D}2: A Pretrained Recursive Neural Network based on Pruned {CKY} for Grammar Induction and Text Representation\",\n author = \"Hu, Xiang and\n Mi, Haitao and\n Li, Liang and\n de Melo, Gerard\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.181/\",\n doi = \"10.18653/v1/2022.emnlp-main.181\",\n pages = \"2809--2821\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.181.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.181/", + "pdf_size": 605238, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5377150233156089138&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Ant Group\u2020; Ant Group\u2020+School of Cyber Science and Technology, Shandong University, China / Key Laboratory of Cryptologic Technology and Information Security of Ministry of Education, Shandong University / Quancheng Laboratory, China\u2021; School of Cyber Science and Technology, Shandong University, China / Key Laboratory of Cryptologic Technology and Information Security of Ministry of Education, Shandong University / Quancheng Laboratory, China\u2021; Hasso Plattner Institute / University of Potsdam\u00a7", + "aff_domain": "alibaba-inc.com;alibaba-inc.com;sdu.edu.cn;demelo.org", + "email": "alibaba-inc.com;alibaba-inc.com;sdu.edu.cn;demelo.org", + "github": "https://github.com/alipay/StructuredLM_RTDT", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+1;1;2", + "aff_unique_norm": "Ant Group;Shandong University;Hasso Plattner Institute", + "aff_unique_dep": ";School of Cyber Science and Technology;", + "aff_unique_url": "https://www.antgroup.com;http://www.sdu.edu.cn;https://www.hpi.de", + "aff_unique_abbr": "Ant Group;SDU;HPI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;1", + "aff_country_unique": "China;Germany" + }, + { + "id": "2022.emnlp-main.313", + "title": "FastClass: A Time-Efficient Approach to Weakly-Supervised Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Weakly-supervised text classification aims to train a classifier using only class descriptions and unlabeled data. Recent research shows that keyword-driven methods can achieve state-of-the-art performance on various tasks. However, these methods not only rely on carefully-crafted class descriptions to obtain class-specific keywords but also require substantial amount of unlabeled data and takes a long time to train. This paper proposes FastClass, an efficient weakly-supervised classification approach. It uses dense text representation to retrieve class-relevant documents from external unlabeled corpus and selects an optimal subset to train a classifier. Compared to keyword-driven methods, our approach is less reliant on initial class descriptions as it no longer needs to expand each class description into a set of class-specific keywords.Experiments on a wide range of classification tasks show that the proposed approach frequently outperforms keyword-driven models in terms of classification accuracy and often enjoys orders-of-magnitude faster training speed.", + "author": "Tingyu Xia; Yue Wang; Yuan Tian; Yi Chang", + "authorids": "/t/tingyu-xia/; /y/yue-wang/; /y/yuan-tian/; /y/yi-chang/", + "bibtex": "@inproceedings{xia-etal-2022-fastclass,\n title = \"{F}ast{C}lass: A Time-Efficient Approach to Weakly-Supervised Text Classification\",\n author = \"Xia, Tingyu and\n Wang, Yue and\n Tian, Yuan and\n Chang, Yi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.313/\",\n doi = \"10.18653/v1/2022.emnlp-main.313\",\n pages = \"4746--4758\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.313.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.313/", + "pdf_size": 579978, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2647687673773584155&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "School of Artificial Intelligence, Jilin University + Key Laboratory of Symbolic Computation and Knowledge Engineering, Jilin University + International Center of Future Science, Jilin University; School of Information and Library Science, University of North Carolina at Chapel Hill; School of Artificial Intelligence, Jilin University + Key Laboratory of Symbolic Computation and Knowledge Engineering, Jilin University + International Center of Future Science, Jilin University; School of Artificial Intelligence, Jilin University + Key Laboratory of Symbolic Computation and Knowledge Engineering, Jilin University + International Center of Future Science, Jilin University", + "aff_domain": "mails.jlu.edu.cn;unc.edu;jlu.edu.cn;jlu.edu.cn", + "email": "mails.jlu.edu.cn;unc.edu;jlu.edu.cn;jlu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0+0;1;0+0+0;0+0+0", + "aff_unique_norm": "Jilin University;University of North Carolina at Chapel Hill", + "aff_unique_dep": "School of Artificial Intelligence;School of Information and Library Science", + "aff_unique_url": "http://www.jlu.edu.cn;https://www.unc.edu", + "aff_unique_abbr": "JLU;UNC Chapel Hill", + "aff_campus_unique_index": ";1;;", + "aff_campus_unique": ";Chapel Hill", + "aff_country_unique_index": "0+0+0;1;0+0+0;0+0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.353", + "title": "Federated Continual Learning for Text Classification via Selective Inter-client Transfer", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this work, we combine the two paradigms: Federated Learning (FL) and Continual Learning (CL) for text classification task in cloud-edge continuum. The objective of Federated Continual Learning (FCL) is to improve deep learning models over life time at each client by (relevant and efficient) knowledge transfer without sharing data. Here, we address challenges in minimizing inter-client interference while knowledge sharing due to heterogeneous tasks across clients in FCL setup. In doing so, we propose a novel framework, Federated Selective Inter-client Transfer (FedSeIT) which selectively combines model parameters of foreign clients. To further maximize knowledge transfer, we assess domain overlap and select informative tasks from the sequence of historical tasks at each foreign client while preserving privacy. Evaluating against the baselines, we show improved performance, a gain of (average) 12.4% in text classification over a sequence of tasks using five datasets from diverse domains. To the best of our knowledge, this is the first work that applies FCL to NLP.", + "author": "Yatin Chaudhary; Pranav Rai; Matthias Schubert; Hinrich Sch\u00fctze; Pankaj Gupta", + "authorids": "/y/yatin-chaudhary/; /p/pranav-rai/; /m/matthias-schubert/; /h/hinrich-schutze/; /p/pankaj-gupta/", + "bibtex": "@inproceedings{chaudhary-etal-2022-federated,\n title = \"Federated Continual Learning for Text Classification via Selective Inter-client Transfer\",\n author = {Chaudhary, Yatin and\n Rai, Pranav and\n Schubert, Matthias and\n Sch{\\\"u}tze, Hinrich and\n Gupta, Pankaj},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.353/\",\n doi = \"10.18653/v1/2022.findings-emnlp.353\",\n pages = \"4789--4799\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.353.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.353/", + "pdf_size": 1896835, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5759345235864843534&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 3, + "aff": "DRIMCo GmbH, Munich, Germany+University of Munich (LMU), Munich, Germany; DRIMCo GmbH, Munich, Germany+University of Munich (LMU), Munich, Germany; University of Munich (LMU), Munich, Germany; University of Munich (LMU), Munich, Germany; DRIMCo GmbH, Munich, Germany", + "aff_domain": "drimco.net;drimco.net;drimco.net;drimco.net;drimco.net", + "email": "drimco.net;drimco.net;drimco.net;drimco.net;drimco.net", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;1;1;0", + "aff_unique_norm": "DRIMCo GmbH;University of Munich", + "aff_unique_dep": ";", + "aff_unique_url": ";https://www.lmu.de", + "aff_unique_abbr": ";LMU", + "aff_campus_unique_index": "0+0;0+0;0;0;0", + "aff_campus_unique": "Munich", + "aff_country_unique_index": "0+0;0+0;0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.430", + "title": "Federated Model Decomposition with Private Vocabulary for Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "With the necessity of privacy protection, it becomes increasingly vital to train deep neural models in a federated learning manner for natural language processing (NLP) tasks. However, recent studies show eavesdroppers (i.e., dishonest servers) can still reconstruct the private input in federated learning (FL). Such a data reconstruction attack relies on the mappings between vocabulary and associated word embedding in NLP tasks, which are unfortunately less studied in current FL methods. In this paper, we propose a fedrated model decomposition method that protects the privacy of vocabularies, shorted as FEDEVOCAB. In FEDEVOCAB, each participant keeps the local embedding layer in the local device and detaches the local embedding parameters from federated aggregation. However, it is challenging to train an accurate NLP model when the private mappings are unknown and vary across participants in a cross-device FL setting. To address this problem, we further propose an adaptive updating technique to improve the performance of local models. Experimental results show that FEDEVOCAB maintains competitive performance and provides better privacy-preserving capacity compared to status quo methods.", + "author": "Zhuo Zhang; Xiangjing Hu; Lizhen Qu; Qifan Wang; Zenglin Xu", + "authorids": "/z/zhuo-zhang/; /x/xiangjing-hu/; /l/lizhen-qu/; /q/qifan-wang/; /z/zenglin-xu/", + "bibtex": "@inproceedings{zhang-etal-2022-federated,\n title = \"Federated Model Decomposition with Private Vocabulary for Text Classification\",\n author = \"Zhang, Zhuo and\n Hu, Xiangjing and\n Qu, Lizhen and\n Wang, Qifan and\n Xu, Zenglin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.430/\",\n doi = \"10.18653/v1/2022.emnlp-main.430\",\n pages = \"6413--6425\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.430.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.430/", + "pdf_size": 800365, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6589058893644663413&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Harbin Institute of Technology (Shenzhen), China+Peng Cheng Lab, Shenzhen, China; Harbin Institute of Technology (Shenzhen), China+Peng Cheng Lab, Shenzhen, China; Monash University, Melbourne, Australia; Meta AI, CA, USA; Harbin Institute of Technology (Shenzhen), China+Peng Cheng Lab, Shenzhen, China", + "aff_domain": "gmail.com;gmail.com;monash.edu;fb.com;hit.edu.cn", + "email": "gmail.com;gmail.com;monash.edu;fb.com;hit.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;2;3;0+1", + "aff_unique_norm": "Harbin Institute of Technology;Peng Cheng Lab;Monash University;Meta AI", + "aff_unique_dep": ";;;", + "aff_unique_url": "http://en.hhit.edu.cn/;;https://www.monash.edu;https://meta.ai", + "aff_unique_abbr": "HIT;;Monash;Meta AI", + "aff_campus_unique_index": "0+0;0+0;1;2;0+0", + "aff_campus_unique": "Shenzhen;Melbourne;CA", + "aff_country_unique_index": "0+0;0+0;1;2;0+0", + "aff_country_unique": "China;Australia;United States" + }, + { + "id": "2022.findings-emnlp.409", + "title": "Few-Shot (Dis)Agreement Identification in Online Discussions with Regularized and Augmented Meta-Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Online discussions are abundant with opinions towards a common topic, and identifying (dis)agreement between a pair of comments enables many opinion mining applications. Realizing the increasing needs to analyze opinions for emergent new topics that however tend to lack annotations, we present the first meta-learning approach for few-shot (dis)agreement identification that can be quickly applied to analyze opinions for new topics with few labeled instances. Furthermore, we enhance the meta-learner\u2019s domain generalization ability from two perspectives. The first is domain-invariant regularization, where we design a lexicon-based regularization loss to enable the meta-learner to learn domain-invariant cues. The second is domain-aware augmentation, where we propose domain-aware task augmentation for meta-training to learn domain-specific expressions. In addition to using an existing dataset, we also evaluate our approach on two very recent new topics, mask mandate and COVID vaccine, using our newly annotated datasets containing 1.5k and 1.4k SubReddits comment pairs respectively. Extensive experiments on three domains/topics demonstrate the effectiveness of our meta-learning approach.", + "author": "Yuanyuan Lei; Ruihong Huang", + "authorids": "/y/yuanyuan-lei/; /r/ruihong-huang/", + "bibtex": "@inproceedings{lei-huang-2022-shot,\n title = \"Few-Shot (Dis)Agreement Identification in Online Discussions with Regularized and Augmented Meta-Learning\",\n author = \"Lei, Yuanyuan and\n Huang, Ruihong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.409/\",\n doi = \"10.18653/v1/2022.findings-emnlp.409\",\n pages = \"5581--5593\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.409.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.409/", + "pdf_size": 312039, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7193369023862704979&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Department of Computer Science and Engineering, Texas A&M University, College Station, TX; Department of Computer Science and Engineering, Texas A&M University, College Station, TX", + "aff_domain": "tamu.edu;tamu.edu", + "email": "tamu.edu;tamu.edu", + "github": "https://github.com/yuanyuanlei-nlp/fewshot_agreement_emnlp_2022", + "project": "https://github.com/yuanyuanlei-nlp/SubReddit_agreement_dataset", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Texas A&M University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.tamu.edu", + "aff_unique_abbr": "TAMU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "College Station", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.197", + "title": "Few-Shot Anaphora Resolution in Scientific Protocols via Mixtures of In-Context Experts", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Anaphora resolution is an important task for information extraction across a range of languages, text genres, and domains, motivating the need for methods that do not require large annotated datasets. In-context learning has emerged as a promising approach, yet there are a number of challenges in applying in-context learning to resolve anaphora. For example, encoding a single in-context demonstration that consists of: an anaphor, a paragraph-length context, and a list of corresponding antecedents, requires conditioning a language model on a long sequence of tokens, limiting the number of demonstrations per prompt.In this paper, we present Mice (Mixtures of In-Context Experts), which we demonstrate is effective for few-shot anaphora resolution in scientific protocols. Given only a handful of training examples, Mice combines the predictions of hundreds of in-context experts, yielding a 30% increase in F1 score over a competitive prompt retrieval baseline. Furthermore, we show Mice can be used to train compact student models without sacrificing performance. As far as we are aware, this is the first work to present experimental results demonstrating the effectiveness of in-context learning on the task of few-shot anaphora resolution in scientific protocols.", + "author": "Nghia T. Le; Fan Bai; Alan Ritter", + "authorids": "/n/nghia-t-le/; /f/fan-bai/; /a/alan-ritter/", + "bibtex": "@inproceedings{le-etal-2022-shot,\n title = \"Few-Shot Anaphora Resolution in Scientific Protocols via Mixtures of In-Context Experts\",\n author = \"Le, Nghia T. and\n Bai, Fan and\n Ritter, Alan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.197/\",\n doi = \"10.18653/v1/2022.findings-emnlp.197\",\n pages = \"2693--2706\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.197.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.197/", + "pdf_size": 460764, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1666252823901870680&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "https://github.com/nle18/mice", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.255", + "title": "Few-Shot Out-of-Domain Transfer Learning of Natural Language Explanations in a Label-Abundant Setup", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Training a model to provide natural language explanations (NLEs) for its predictions usually requires the acquisition of task-specific NLEs, which is time- and resource-consuming. A potential solution is the few-shot out-of-domain transfer of NLEs from a parent task with many NLEs to a child task.In this work, we examine the setup in which the child task has few NLEs but abundant labels. We establish four few-shot transfer learning methods that cover the possible fine-tuning combinations of the labels and NLEs for the parent and child tasks. We transfer explainability from a large natural language inference dataset (e-SNLI) separately to two child tasks: (1) hard cases of pronoun resolution, where we introduce the small-e-WinoGrande dataset of NLEs on top of the WinoGrande dataset, and (2) commonsense validation (ComVE). Our results demonstrate that the parent task helps with NLE generation and we establish the best methods for this setup.", + "author": "Yordan Yordanov; Vid Kocijan; Thomas Lukasiewicz; Oana-Maria Camburu", + "authorids": "/y/yordan-yordanov/; /v/vid-kocijan/; /t/thomas-lukasiewicz/; /o/oana-maria-camburu/", + "bibtex": "@inproceedings{yordanov-etal-2022-shot,\n title = \"Few-Shot Out-of-Domain Transfer Learning of Natural Language Explanations in a Label-Abundant Setup\",\n author = \"Yordanov, Yordan and\n Kocijan, Vid and\n Lukasiewicz, Thomas and\n Camburu, Oana-Maria\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.255/\",\n doi = \"10.18653/v1/2022.findings-emnlp.255\",\n pages = \"3486--3501\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.255.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.255/", + "pdf_size": 1227483, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10733533946408227026&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 5, + "aff": "University of Oxford+TU Wien; Kumo.ai; TU Wien; University College London", + "aff_domain": "cs.ox.ac.uk;kumo.ai;tuwien.ac.at;cs.ucl.ac.uk", + "email": "cs.ox.ac.uk;kumo.ai;tuwien.ac.at;cs.ucl.ac.uk", + "github": "https://github.com/YDYordanov/Few-shot-NLEs", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;1;3", + "aff_unique_norm": "University of Oxford;Technische Universit\u00e4t Wien;Kumo.ai;University College London", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.ox.ac.uk;https://www.tuwien.ac.at;https://www.kumo.ai;https://www.ucl.ac.uk", + "aff_unique_abbr": "Oxford;TU Wien;Kumo.ai;UCL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;2;1;0", + "aff_country_unique": "United Kingdom;Austria;United States" + }, + { + "id": "2022.emnlp-main.616", + "title": "Few-shot Learning with Multilingual Generative Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large-scale generative language models such as GPT-3 are competitive few-shot learners. While these models are known to be able to jointly represent many different languages, their training data is dominated by English, potentially limiting their cross-lingual generalization. In this work, we train multilingual generative language models on a corpus covering a diverse set of languages, and study their few- and zero-shot learning capabilities in a wide range of tasks. Our largest model with 7.5 billion parameters sets new state of the art in few-shot learning in more than 20 representative languages, outperforming GPT-3 of comparable size in multilingual commonsense reasoning (with +7.4% absolute accuracy improvement in 0-shot settings and +9.4% in 4-shot settings) and natural language inference (+5.4% in each of 0-shot and 4-shot settings). On the FLORES-101 machine translation benchmark, our model outperforms GPT-3 on 171 out of 182 directions with 32 training examples, while surpassing the official supervised baseline in 45 directions. We conduct an in-depth analysis of different multilingual prompting approaches, showing in particular that strong few-shot learning performance across languages can be achieved via cross-lingual transfer through both templates and demonstration examples.", + "author": "Xi Victoria Lin; Todor Mihaylov; Mikel Artetxe; Tianlu Wang; Shuohui Chen; Daniel Simig; Myle Ott; Naman Goyal; Shruti Bhosale; Jingfei Du; Ramakanth Pasunuru; Sam Shleifer; Punit Singh Koura; Vishrav Chaudhary; Brian O\u2019Horo; Jeff Wang; Luke Zettlemoyer; Zornitsa Kozareva; Mona Diab; Veselin Stoyanov; Xian Li", + "authorids": "/x/xi-victoria-lin/; /t/todor-mihaylov/; /m/mikel-artetxe/; /t/tianlu-wang/; /s/shuohui-chen/; /d/daniel-simig/; /m/myle-ott/; /n/naman-goyal/; /s/shruti-bhosale/; /j/jingfei-du/; /r/ramakanth-pasunuru/; /s/sam-shleifer/; /p/punit-singh-koura/; /v/vishrav-chaudhary/; /b/brian-ohoro/; /j/jeff-wang/; /l/luke-zettlemoyer/; /z/zornitsa-kozareva/; /m/mona-diab/; /v/veselin-stoyanov/; /x/xian-li/", + "bibtex": "@inproceedings{lin-etal-2022-shot,\n title = \"Few-shot Learning with Multilingual Generative Language Models\",\n author = \"Lin, Xi Victoria and\n Mihaylov, Todor and\n Artetxe, Mikel and\n Wang, Tianlu and\n Chen, Shuohui and\n Simig, Daniel and\n Ott, Myle and\n Goyal, Naman and\n Bhosale, Shruti and\n Du, Jingfei and\n Pasunuru, Ramakanth and\n Shleifer, Sam and\n Koura, Punit Singh and\n Chaudhary, Vishrav and\n O{'}Horo, Brian and\n Wang, Jeff and\n Zettlemoyer, Luke and\n Kozareva, Zornitsa and\n Diab, Mona and\n Stoyanov, Veselin and\n Li, Xian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.616/\",\n doi = \"10.18653/v1/2022.emnlp-main.616\",\n pages = \"9019--9052\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.616.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.616/", + "pdf_size": 1495514, + "gs_citation": 88, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4261991115496719344&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI", + "aff_domain": "meta.com; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ;diab.ws; ;meta.com", + "email": "meta.com; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ; ;diab.ws; ;meta.com", + "github": "https://github.com/facebookresearch/fairseq/tree/main/examples/xglm", + "project": "", + "author_num": 21, + "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Meta Platforms, Inc.", + "aff_unique_dep": "Meta AI", + "aff_unique_url": "https://meta.com", + "aff_unique_abbr": "Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.243", + "title": "Few-shot Query-Focused Summarization with Prefix-Merging", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Query-focused summarization has been considered as an important extension for text summarization. It aims to generate a concise highlight for a given query. Different from text summarization, query-focused summarization has long been plagued by the problem of lacking high-quality large-scale datasets. In this paper, we investigate the idea that whether we can integrate and transfer the knowledge of text summarization and question answering to assist the few-shot learning in query-focused summarization. Here, we propose prefix-merging, a prefix-based pretraining strategy for few-shot learning in query-focused summarization. Drawn inspiration from prefix-tuning, we are allowed to integrate the task knowledge from text summarization and question answering into a properly designed prefix and apply the merged prefix to query-focused summarization. With only a small amount of trainable parameters, prefix-merging outperforms fine-tuning on query-focused summarization. We further discuss the influence of different prefix designs and propose a visualized explanation for how prefix-merging works.", + "author": "Ruifeng Yuan; Zili Wang; Ziqiang Cao; Wenjie Li", + "authorids": "/r/ruifeng-yuan/; /z/zili-wang/; /z/ziqiang-cao/; /w/wenjie-li/", + "bibtex": "@inproceedings{yuan-etal-2022-shot,\n title = \"Few-shot Query-Focused Summarization with Prefix-Merging\",\n author = \"Yuan, Ruifeng and\n Wang, Zili and\n Cao, Ziqiang and\n Li, Wenjie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.243/\",\n doi = \"10.18653/v1/2022.emnlp-main.243\",\n pages = \"3704--3714\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.243.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.243/", + "pdf_size": 369145, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6410651573652674618&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "The Hong Kong Polytechnic University; Xidian University; Soochow University; The Hong Kong Polytechnic University", + "aff_domain": "comp.polyu.edu.hk;gmail.com;suda.edu.cn;comp.polyu.edu.hk", + "email": "comp.polyu.edu.hk;gmail.com;suda.edu.cn;comp.polyu.edu.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "The Hong Kong Polytechnic University;Xidian University;Soochow University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.polyu.edu.hk;http://www.xidian.edu.cn/;https://www.soochow.edu.cn", + "aff_unique_abbr": "PolyU;Xidian;Soochow U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.80", + "title": "Few-shot initializing of Active Learner via Meta-Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Despite the important evolutions in few-shot and zero-shot learning techniques, domain specific applications still require expert knowledge and significant effort in annotating and labeling a large volume of unstructured textual data. To mitigate this problem, active learning, and meta-learning attempt to reach a high performance with the least amount of labeled data. In this paper, we introduce a novel approach to combine both lines of work by initializing an active learner with meta-learned parameters obtained through meta-training on tasks similar to the target task during active learning. In this approach we use the pre-trained BERT as our text-encoder and meta-learn its parameters with LEOPARD, which extends the model-agnostic meta-learning method by generating task dependent softmax weights to enable learning across tasks with different number of classes. We demonstrate the effectiveness of our method by performing active learning on five natural language understanding tasks and six datasets with five different acquisition functions. We train two different meta-initializations, and we use the pre-trained BERT base initialization as baseline. We observe that our approach performs better than the baseline at low budget, especially when closely related tasks were present during meta-learning. Moreover, our results show that better performance in the initial phase, i.e., with fewer labeled samples, leads to better performance when larger acquisition batches are used. We also perform an ablation study of the proposed method, showing that active learning with only the meta-learned weights is beneficial and adding the meta-learned learning rates and generating the softmax have negative consequences for the performance.", + "author": "Zi Long Zhu; Vikrant Yadav; Zubair Afzal; George Tsatsaronis", + "authorids": "/z/zi-long-zhu/; /v/vikrant-yadav/; /z/zubair-afzal/; /g/george-tsatsaronis/", + "bibtex": "@inproceedings{zhu-etal-2022-shot,\n title = \"Few-shot initializing of Active Learner via Meta-Learning\",\n author = \"Zhu, Zi Long and\n Yadav, Vikrant and\n Afzal, Zubair and\n Tsatsaronis, George\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.80/\",\n doi = \"10.18653/v1/2022.findings-emnlp.80\",\n pages = \"1117--1133\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.80.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.80/", + "pdf_size": 564785, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17308098400494756300&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 2, + "aff": "Elsevier, the Netherlands; Elsevier, the Netherlands; Elsevier, the Netherlands; Elsevier, the Netherlands", + "aff_domain": "elsevier.com;elsevier.com;elsevier.com;elsevier.com", + "email": "elsevier.com;elsevier.com;elsevier.com;elsevier.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Elsevier", + "aff_unique_dep": "", + "aff_unique_url": "https://www.elsevier.com", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Netherlands" + }, + { + "id": "2022.emnlp-main.285", + "title": "FiE: Building a Global Probability Space by Leveraging Early Fusion in Encoder for Open-Domain Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Generative models have recently started to outperform extractive models in Open Domain Question Answering, largely by leveraging their decoder to attend over multiple encoded passages and combining their information. However, generative models tend to be larger than extractive models due to the need for a decoder, run slower during inference due to auto-regressive decoder beam search, and their generated output often suffers from hallucinations. We propose to extend transformer encoders with the ability to fuse information from multiple passages, using global representation to provide cross-sample attention over all tokens across samples. Furthermore, we propose an alternative answer span probability calculation to better aggregate answer scores in the global space of all samples. Using our proposed method, we outperform the current state-of-the-art method by 2.5 Exact Match score on the Natural Question dataset while using only 25% of parameters and 35% of the latency during inference, and 4.4 Exact Match on WebQuestions dataset. When coupled with synthetic data augmentation, we outperform larger models on the TriviaQA dataset as well. The latency and parameter savings of our method make it particularly attractive for open-domain question answering, as these models are often compute-intensive.", + "author": "Akhil Kedia; Mohd Abbas Zaidi; Haejun Lee", + "authorids": "/a/akhil-kedia/; /m/mohd-abbas-zaidi/; /h/haejun-lee/", + "bibtex": "@inproceedings{kedia-etal-2022-fie,\n title = \"{F}i{E}: Building a Global Probability Space by Leveraging Early Fusion in Encoder for Open-Domain Question Answering\",\n author = \"Kedia, Akhil and\n Zaidi, Mohd Abbas and\n Lee, Haejun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.285/\",\n doi = \"10.18653/v1/2022.emnlp-main.285\",\n pages = \"4246--4260\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.285.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.285/", + "pdf_size": 866134, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6115501184466310666&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff": "Samsung Research, Seoul; Samsung Research, Seoul; Samsung Research, Seoul", + "aff_domain": "samsung.com;samsung.com;samsung.com", + "email": "samsung.com;samsung.com;samsung.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Samsung Research", + "aff_unique_dep": "", + "aff_unique_url": "https://www.samsung.com/global/research/", + "aff_unique_abbr": "Samsung", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Seoul", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.emnlp-main.476", + "title": "FigMemes: A Dataset for Figurative Language Identification in Politically-Opinionated Memes", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Real-world politically-opinionated memes often rely on figurative language to cloak propaganda and radical ideas to help them spread. It is not only a scientific challenge to develop machine learning models to recognize them in memes, but also sociologically beneficial to understand hidden meanings at scale and raise awareness. These memes are fast-evolving (in both topics and visuals) and it remains unclear whether current multimodal machine learning models are robust to such distribution shifts. To enable future research into this area, we first present FigMemes, a dataset for figurative language classification in politically-opinionated memes. We evaluate the performance of state-of-the-art unimodal and multimodal models and provide comprehensive benchmark results. The key contributions of this proposed dataset include annotations of six commonly used types of figurative language in politically-opinionated memes, and a wide range of topics and visual styles.We also provide analyses on the ability of multimodal models to generalize across distribution shifts in memes. Our dataset poses unique machine learning challenges and our results show that current models have significant room for improvement in both performance and robustness to distribution shifts.", + "author": "Chen Liu; Gregor Geigle; Robin Krebs; Iryna Gurevych", + "authorids": "/c/chen-liu/; /g/gregor-geigle/; /r/robin-krebs/; /i/iryna-gurevych/", + "bibtex": "@inproceedings{liu-etal-2022-figmemes,\n title = \"{F}ig{M}emes: A Dataset for Figurative Language Identification in Politically-Opinionated Memes\",\n author = \"Liu, Chen and\n Geigle, Gregor and\n Krebs, Robin and\n Gurevych, Iryna\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.476/\",\n doi = \"10.18653/v1/2022.emnlp-main.476\",\n pages = \"7069--7086\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.476.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.476/", + "pdf_size": 2157424, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12795090842989223407&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University of Darmstadt; Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University of Darmstadt + W\u00fcNLP & Computer Vision Lab, CAIDAS, University of W\u00fcrzburg; Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University of Darmstadt; Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University of Darmstadt", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/UKPLab/emnlp2022-figmemes", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1+2;0+1+2+3;0+1+2;0+1+2", + "aff_unique_norm": "University of Duisburg-Essen;University of Massachusetts Amherst;Technical University of Darmstadt;University of W\u00fcrzburg", + "aff_unique_dep": "Ubiquitous Knowledge Processing Lab;Department of Computer Science;;W\u00fcNLP & Computer Vision Lab, CAIDAS", + "aff_unique_url": "https://www.ukp.tu-darmstadt.de/;https://www.cics.umass.edu;https://www.tu-darmstadt.de;https://www.uni-wuerzburg.de", + "aff_unique_abbr": "UKP Lab;UMass CS;TUD;UWue", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1+0;0+1+0+0;0+1+0;0+1+0", + "aff_country_unique": "Germany;United States" + }, + { + "id": "2022.findings-emnlp.399", + "title": "Find Someone Who: Visual Commonsense Understanding in Human-Centric Grounding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "From a visual scene containing multiple people, human is able to distinguish each individual given the context descriptions about what happened before, their mental/physical states or intentions, etc. Above ability heavily relies on human-centric commonsense knowledge and reasoning. For example, if asked to identify the \u201cperson who needs healing\u201d in an image, we need to first know that they usually have injuries or suffering expressions, then find the corresponding visual clues before finally grounding the person. We present a new commonsense task, Human-centric Commonsense Grounding, that tests the models\u2019 ability to ground individuals given the context descriptions about what happened before, and their mental/physical states or intentions. We further create a benchmark, HumanCog, a dataset with 130k grounded commonsensical descriptions annotated on 67k images, covering diverse types of commonsense and visual scenes. We set up a context-object-aware method as a strong baseline that outperforms previous pre-trained and non-pretrained models. Further analysis demonstrates that rich visual commonsense and powerful integration of multi-modal commonsense are essential, which sheds light on future works. Data and code will be available at https://github.com/Hxyou/HumanCog.", + "author": "Haoxuan You; Rui Sun; Zhecan Wang; Kai-Wei Chang; Shih-Fu Chang", + "authorids": "/h/haoxuan-you/; /r/rui-sun/; /z/zhecan-wang/; /k/kai-wei-chang/; /s/shih-fu-chang/", + "bibtex": "@inproceedings{you-etal-2022-find,\n title = \"Find Someone Who: Visual Commonsense Understanding in Human-Centric Grounding\",\n author = \"You, Haoxuan and\n Sun, Rui and\n Wang, Zhecan and\n Chang, Kai-Wei and\n Chang, Shih-Fu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.399/\",\n doi = \"10.18653/v1/2022.findings-emnlp.399\",\n pages = \"5444--5454\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.399.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.399/", + "pdf_size": 2546517, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5922552971910412381&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Columbia University; Columbia University; Columbia University; University of California, Los Angeles; Columbia University", + "aff_domain": "columbia.edu;columbia.edu;columbia.edu;cs.ucla.edu;columbia.edu", + "email": "columbia.edu;columbia.edu;columbia.edu;cs.ucla.edu;columbia.edu", + "github": "https://github.com/Hxyou/HumanCog", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Columbia University;University of California, Los Angeles", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.columbia.edu;https://www.ucla.edu", + "aff_unique_abbr": "Columbia;UCLA", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.293", + "title": "Finding Dataset Shortcuts with Grammar Induction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Many NLP datasets have been found to contain shortcuts: simple decision rules that achieve surprisingly high accuracy. However, it is difficult to discover shortcuts automatically. Prior work on automatic shortcut detection has focused on enumerating features like unigrams or bigrams, which can find only low-level shortcuts, or relied on post-hoc model interpretability methods like saliency maps, which reveal qualitative patterns without a clear statistical interpretation. In this work, we propose to use probabilistic grammars to characterize and discover shortcuts in NLP datasets. Specifically, we use a context-free grammar to model patterns in sentence classification datasets and use a synchronous context-free grammar to model datasets involving sentence pairs. The resulting grammars reveal interesting shortcut features in a number of datasets, including both simple and high-level features, and automatically identify groups of test examples on which conventional classifiers fail. Finally, we show that the features we discover can be used to generate diagnostic contrast examples and incorporated into standard robust optimization methods to improve worst-group accuracy.", + "author": "Dan Friedman; Alexander Wettig; Danqi Chen", + "authorids": "/d/dan-friedman/; /a/alexander-wettig/; /d/danqi-chen/", + "bibtex": "@inproceedings{friedman-etal-2022-finding,\n title = \"Finding Dataset Shortcuts with Grammar Induction\",\n author = \"Friedman, Dan and\n Wettig, Alexander and\n Chen, Danqi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.293/\",\n doi = \"10.18653/v1/2022.emnlp-main.293\",\n pages = \"4345--4363\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.293.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.293/", + "pdf_size": 492932, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1027486041989629353&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science, Princeton University; Department of Computer Science, Princeton University; Department of Computer Science, Princeton University", + "aff_domain": "cs.princeton.edu;cs.princeton.edu;cs.princeton.edu", + "email": "cs.princeton.edu;cs.princeton.edu;cs.princeton.edu", + "github": "https://github.com/princeton-nlp/ShortcutGrammar", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Princeton University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.princeton.edu", + "aff_unique_abbr": "Princeton", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.378", + "title": "Finding Memo: Extractive Memorization in Constrained Sequence Generation Tasks", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Memorization presents a challenge for several constrained Natural Language Generation (NLG) tasks such as Neural Machine Translation (NMT), wherein the proclivity of neural models to memorize noisy and atypical samples reacts adversely with the noisy (web crawled) datasets. However, previous studies of memorization in constrained NLG tasks have only focused on counterfactual memorization, linking it to the problem of hallucinations. In this work, we propose a new, inexpensive algorithm for extractive memorization (exact training data generation under insufficient context) in constrained sequence generation tasks and use it to study extractive memorization and its effects in NMT. We demonstrate that extractive memorization poses a serious threat to NMT reliability by qualitatively and quantitatively characterizing the memorized samples as well as the model behavior in their vicinity. Based on empirical observations, we develop a simple algorithm which elicits non-memorized translations of memorized samples from the same model, for a large fraction of such samples. Finally, we show that the proposed algorithm could also be leveraged to mitigate memorization in the model through finetuning. We have released the code to reproduce our results at https://github.com/vyraun/Finding-Memo.", + "author": "Vikas Raunak; Arul Menezes", + "authorids": "/v/vikas-raunak/; /a/arul-menezes/", + "bibtex": "@inproceedings{raunak-menezes-2022-finding,\n title = \"Finding Memo: Extractive Memorization in Constrained Sequence Generation Tasks\",\n author = \"Raunak, Vikas and\n Menezes, Arul\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.378/\",\n doi = \"10.18653/v1/2022.findings-emnlp.378\",\n pages = \"5153--5162\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.378.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.378/", + "pdf_size": 218539, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14032508412882467824&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "https://github.com/vyraun/Finding-Memo", + "project": "", + "author_num": 2 + }, + { + "id": "2022.emnlp-main.765", + "title": "Finding Skill Neurons in Pre-trained Transformer-based Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transformer-based pre-trained language models have demonstrated superior performance on various natural language processing tasks. However, it remains unclear how the skills required to handle these tasks distribute among model parameters. In this paper, we find that after prompt tuning for specific tasks, the activations of some neurons within pre-trained Transformers are highly predictive of the task labels. We dub these neurons skill neurons and confirm they encode task-specific skills by finding that: (1) Skill neurons are crucial for handling tasks. Performances of pre-trained Transformers on a task significantly drop when corresponding skill neurons are perturbed. (2) Skill neurons are task-specific. Similar tasks tend to have similar distributions of skill neurons. Furthermore, we demonstrate the skill neurons are most likely generated in pre-training rather than fine-tuning by showing that the skill neurons found with prompt tuning are also crucial for other fine-tuning methods freezing neuron weights, such as the adapter-based tuning and BitFit. We also explore the applications of skill neurons, including accelerating Transformers with network pruning and building better transferability indicators. These findings may promote further research on understanding Transformers. The source code can be obtained from https://github.com/THU-KEG/Skill-Neuron.", + "author": "Xiaozhi Wang; Kaiyue Wen; Zhengyan Zhang; Lei Hou; Zhiyuan Liu; Juanzi Li", + "authorids": "/x/xiaozhi-wang/; /k/kaiyue-wen/; /z/zhengyan-zhang/; /l/lei-hou/; /z/zhiyuan-liu/; /j/juanzi-li/", + "bibtex": "@inproceedings{wang-etal-2022-finding-skill,\n title = \"Finding Skill Neurons in Pre-trained Transformer-based Language Models\",\n author = \"Wang, Xiaozhi and\n Wen, Kaiyue and\n Zhang, Zhengyan and\n Hou, Lei and\n Liu, Zhiyuan and\n Li, Juanzi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.765/\",\n doi = \"10.18653/v1/2022.emnlp-main.765\",\n pages = \"11132--11152\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.765.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.765/", + "pdf_size": 1310488, + "gs_citation": 83, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5742174133502662562&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/THU-KEG/Skill-Neuron", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.697", + "title": "Fine-Tuning Pre-trained Transformers into Decaying Fast Weights", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Autoregressive Transformers are strong language models but incur O(T) complexity during per-token generation due to the self-attention mechanism. Recent work proposes kernel-based methods to approximate causal self-attention by replacing it with recurrent formulations with various update rules and feature maps to achieve O(1) time and memory complexity. We explore these approaches and find that they are unnecessarily complex, and propose a simple alternative - decaying fast weights - that runs fast on GPU, outperforms prior methods, and retains 99% of attention\u2019s performance for GPT-2. We also show competitive performance on WikiText-103 against more complex attention substitutes.", + "author": "Huanru Henry Mao", + "authorids": "/h/huanru-henry-mao/", + "bibtex": "@inproceedings{mao-2022-fine,\n title = \"Fine-Tuning Pre-trained Transformers into Decaying Fast Weights\",\n author = \"Mao, Huanru Henry\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.697/\",\n doi = \"10.18653/v1/2022.emnlp-main.697\",\n pages = \"10236--10242\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.697.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.697/", + "pdf_size": 292763, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17668529188237156913&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 1 + }, + { + "id": "2022.emnlp-main.85", + "title": "Fine-grained Category Discovery under Coarse-grained supervision with Hierarchical Weighted Self-contrastive Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Novel category discovery aims at adapting models trained on known categories to novel categories. Previous works only focus on the scenario where known and novel categories are of the same granularity.In this paper, we investigate a new practical scenario called Fine-grained Category Discovery under Coarse-grained supervision (FCDC). FCDC aims at discovering fine-grained categories with only coarse-grained labeled data, which can adapt models to categories of different granularity from known ones and reduce significant labeling cost. It is also a challenging task since supervised training on coarse-grained categories tends to focus on inter-class distance (distance between coarse-grained classes) but ignore intra-class distance (distance between fine-grained sub-classes) which is essential for separating fine-grained categories.Considering most current methods cannot transfer knowledge from coarse-grained level to fine-grained level, we propose a hierarchical weighted self-contrastive network by building a novel weighted self-contrastive module and combining it with supervised learning in a hierarchical manner.Extensive experiments on public datasets show both effectiveness and efficiency of our model over compared methods.", + "author": "Wenbin An; Feng Tian; Ping Chen; Siliang Tang; Qinghua Zheng; QianYing Wang", + "authorids": "/w/wenbin-an/; /f/feng-tian/; /p/ping-chen/; /s/siliang-tang/; /q/qinghua-zheng/; /q/qianying-wang/", + "bibtex": "@inproceedings{an-etal-2022-fine,\n title = \"Fine-grained Category Discovery under Coarse-grained supervision with Hierarchical Weighted Self-contrastive Learning\",\n author = \"An, Wenbin and\n Tian, Feng and\n Chen, Ping and\n Tang, Siliang and\n Zheng, Qinghua and\n Wang, QianYing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.85/\",\n doi = \"10.18653/v1/2022.emnlp-main.85\",\n pages = \"1314--1323\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.85.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.85/", + "pdf_size": 1591254, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17865137003599060982&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/Lackel/Hierarchical_Weighted_SCL", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.71", + "title": "Fine-grained Contrastive Learning for Relation Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent relation extraction (RE) works have shown encouraging improvements by conducting contrastive learning on silver labels generated by distant supervision before fine-tuning on gold labels. Existing methods typically assume all these silver labels are accurate and treat them equally; however, distant supervision is inevitably noisy\u2013some silver labels are more reliable than others. In this paper, we propose fine-grained contrastive learning (FineCL) for RE, which leverages fine-grained information about which silver labels are and are not noisy to improve the quality of learned relationship representations for RE. We first assess the quality of silver labels via a simple and automatic approach we call \u201clearning order denoising,\u201d where we train a language model to learn these relations and record the order of learned training instances. We show that learning order largely corresponds to label accuracy\u2013early-learned silver labels have, on average, more accurate labels than later-learned silver labels. Then, during pre-training, we increase the weights of accurate labels within a novel contrastive learning objective. Experiments on several RE benchmarks show that FineCL makes consistent and significant performance gains over state-of-the-art methods.", + "author": "William Hogan; Jiacheng Li; Jingbo Shang", + "authorids": "/w/william-hogan/; /j/jiacheng-li/; /j/jingbo-shang/", + "bibtex": "@inproceedings{hogan-etal-2022-fine,\n title = \"Fine-grained Contrastive Learning for Relation Extraction\",\n author = \"Hogan, William and\n Li, Jiacheng and\n Shang, Jingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.71/\",\n doi = \"10.18653/v1/2022.emnlp-main.71\",\n pages = \"1083--1095\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.71.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.71/", + "pdf_size": 920998, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14250746685045667104&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff": "Department of Computer Science & Engineering, University of California, San Diego; Department of Computer Science & Engineering, University of California, San Diego; Department of Computer Science & Engineering, University of California, San Diego", + "aff_domain": "ucsd.edu;ucsd.edu;ucsd.edu", + "email": "ucsd.edu;ucsd.edu;ucsd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of California, San Diego", + "aff_unique_dep": "Department of Computer Science & Engineering", + "aff_unique_url": "https://www.ucsd.edu", + "aff_unique_abbr": "UCSD", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "San Diego", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.26", + "title": "Fine-mixing: Mitigating Backdoors in Fine-tuned Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Deep Neural Networks (DNNs) are known to be vulnerable to backdoor attacks. In Natural Language Processing (NLP), DNNs are often backdoored during the fine-tuning process of a large-scale Pre-trained Language Model (PLM) with poisoned samples. Although the clean weights of PLMs are readily available, existing methods have ignored this information in defending NLP models against backdoor attacks. In this work, we take the first step to exploit the pre-trained (unfine-tuned) weights to mitigate backdoors in fine-tuned language models. Specifically, we leverage the clean pre-trained weights via two complementary techniques: (1) a two-step Fine-mixing technique, which first mixes the backdoored weights (fine-tuned on poisoned data) with the pre-trained weights, then fine-tunes the mixed weights on a small subset of clean data; (2) an Embedding Purification (E-PUR) technique, which mitigates potential backdoors existing in the word embeddings. We compare Fine-mixing with typical backdoor mitigation methods on three single-sentence sentiment classification tasks and two sentence-pair classification tasks and show that it outperforms the baselines by a considerable margin in all scenarios. We also show that our E-PUR method can benefit existing mitigation methods. Our work establishes a simple but strong baseline defense for secure fine-tuned NLP models against backdoor attacks.", + "author": "Zhiyuan Zhang; Lingjuan Lyu; Xingjun Ma; Chenguang Wang; Xu Sun", + "authorids": "/z/zhiyuan-zhang/; /l/lingjuan-lyu/; /x/xingjun-ma/; /c/chenguang-wang/; /x/xu-sun/", + "bibtex": "@inproceedings{zhang-etal-2022-fine-mixing,\n title = \"Fine-mixing: Mitigating Backdoors in Fine-tuned Language Models\",\n author = \"Zhang, Zhiyuan and\n Lyu, Lingjuan and\n Ma, Xingjun and\n Wang, Chenguang and\n Sun, Xu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.26/\",\n doi = \"10.18653/v1/2022.findings-emnlp.26\",\n pages = \"355--372\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.26.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.26/", + "pdf_size": 1236549, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9857605012138863457&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 5, + "aff": "MOE Key Laboratory of Computational Linguistics, School of Computer Science, Peking University; Sony AI; Fudan University; Washington University in St. Louis; MOE Key Laboratory of Computational Linguistics, School of Computer Science, Peking University", + "aff_domain": "pku.edu.cn;sony.com;fudan.edu.cn;wustl.edu;pku.edu.cn", + "email": "pku.edu.cn;sony.com;fudan.edu.cn;wustl.edu;pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;0", + "aff_unique_norm": "Peking University;Sony;Fudan University;Washington University in St. Louis", + "aff_unique_dep": "School of Computer Science;Sony AI;;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.sony.com;https://www.fudan.edu.cn;https://wustl.edu", + "aff_unique_abbr": "PKU;Sony AI;Fudan;WashU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";St. Louis", + "aff_country_unique_index": "0;1;0;2;0", + "aff_country_unique": "China;Japan;United States" + }, + { + "id": "2022.emnlp-main.410", + "title": "Fine-tuned Language Models are Continual Learners", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work on large language models relies on the intuition that most natural language processing tasks can be described via natural language instructions and that models trained on these instructions show strong zero-shot performance on several standard datasets. However, these models even though impressive still perform poorly on a wide range of tasks outside of their respective training and evaluation sets.To address this limitation, we argue that a model should be able to keep extending its knowledge and abilities, without forgetting previous skills. In spite of the limited success of Continual Learning, we show that Fine-tuned Language Models can be continual learners.We empirically investigate the reason for this success and conclude that Continual Learning emerges from self-supervision pre-training. Our resulting model Continual-T0 (CT0) is able to learn 8 new diverse language generation tasks, while still maintaining good performance on previous tasks, spanning in total of 70 datasets. Finally, we show that CT0 is able to combine instructions in ways it was never trained for, demonstrating some level of instruction compositionality.", + "author": "Thomas Scialom; Tuhin Chakrabarty; Smaranda Muresan", + "authorids": "/t/thomas-scialom/; /t/tuhin-chakrabarty/; /s/smaranda-muresan/", + "bibtex": "@inproceedings{scialom-etal-2022-fine,\n title = \"Fine-tuned Language Models are Continual Learners\",\n author = \"Scialom, Thomas and\n Chakrabarty, Tuhin and\n Muresan, Smaranda\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.410/\",\n doi = \"10.18653/v1/2022.emnlp-main.410\",\n pages = \"6107--6122\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.410.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.410/", + "pdf_size": 635396, + "gs_citation": 108, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5220201697603329224&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Meta AI; Department of Computer Science, Columbia University; Department of Computer Science, Columbia University", + "aff_domain": "fb.com;cs.columbia.edu;cs.columbia.edu", + "email": "fb.com;cs.columbia.edu;cs.columbia.edu", + "github": "https://github.com/ThomasScialom/T0_continual_learning", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Meta Platforms, Inc.;Columbia University", + "aff_unique_dep": "Meta AI;Department of Computer Science", + "aff_unique_url": "https://meta.com;https://www.columbia.edu", + "aff_unique_abbr": "Meta;Columbia", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.220", + "title": "FineD-Eval: Fine-grained Automatic Dialogue-Level Evaluation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent model-based reference-free metrics for open-domain dialogue evaluation exhibit promising correlations with human judgment. However, they either perform turn-level evaluation or look at a single dialogue quality dimension. One would expect a good evaluation metric to assess multiple quality dimensions at the dialogue level. To this end, we are motivated to propose a multi-dimensional dialogue-level metric, which consists of three sub-metrics with each targeting a specific dimension. The sub-metrics are trained with novel self-supervised objectives and exhibit strong correlations with human judgment for their respective dimensions. Moreover, we explore two approaches to combine the sub-metrics: metric ensemble and multitask learning. Both approaches yield a holistic metric that significantly outperforms individual sub-metrics. Compared to the existing state-of-the-art metric, the combined metrics achieve around 16% relative improvement on average across three high-quality dialogue-level evaluation benchmarks.", + "author": "Chen Zhang; Luis Fernando D\u2019Haro; Qiquan Zhang; Thomas Friedrichs; Haizhou Li", + "authorids": "/c/chen-zhang/; /l/luis-fernando-dharo/; /q/qiquan-zhang/; /t/thomas-friedrichs/; /h/haizhou-li/", + "bibtex": "@inproceedings{zhang-etal-2022-fined,\n title = \"{F}ine{D}-Eval: Fine-grained Automatic Dialogue-Level Evaluation\",\n author = \"Zhang, Chen and\n D{'}Haro, Luis Fernando and\n Zhang, Qiquan and\n Friedrichs, Thomas and\n Li, Haizhou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.220/\",\n doi = \"10.18653/v1/2022.emnlp-main.220\",\n pages = \"3336--3355\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.220.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.220/", + "pdf_size": 504768, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7529705085990558967&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "National University of Singapore+Kriston AI Lab; Universidad Polit\u00e9cnica de Madrid, Spain; National University of Singapore; Robert Bosch (SEA), Singapore+The Chinese University of Hong Kong, Shenzhen, China; National University of Singapore+The Chinese University of Hong Kong, Shenzhen, China+Kriston AI Lab", + "aff_domain": "u.nus.edu; ; ; ; ", + "email": "u.nus.edu; ; ; ; ", + "github": "https://github.com/e0397123/FineD-Eval", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;0;3+4;0+4+1", + "aff_unique_norm": "National University of Singapore;Kriston AI Lab;Universidad Polit\u00e9cnica de Madrid;Robert Bosch;The Chinese University of Hong Kong", + "aff_unique_dep": ";AI Lab;;;", + "aff_unique_url": "https://www.nus.edu.sg;;https://www.upm.es;https://www.bosch.com;https://www.cuhk.edu.cn", + "aff_unique_abbr": "NUS;;UPM;Bosch;CUHK", + "aff_campus_unique_index": ";1+2;2", + "aff_campus_unique": ";Singapore;Shenzhen", + "aff_country_unique_index": "0;2;0;0+3;0+3", + "aff_country_unique": "Singapore;;Spain;China" + }, + { + "id": "2022.emnlp-main.797", + "title": "Fixing Model Bugs with Natural Language Patches", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Current approaches for fixing systematic problems in NLP models (e.g., regex patches, finetuning on more data) are either brittle, or labor-intensive and liable to shortcuts. In contrast, humans often provide corrections to each other through natural language. Taking inspiration from this, we explore natural language patches\u2014declarative statements that allow developers to provide corrective feedback at the right level of abstraction, either overriding the model (\u201cif a review gives 2 stars, the sentiment is negative\u201d) or providing additional information the model may lack (\u201cif something is described as the bomb, then it is good\u201d). We model the task of determining if a patch applies separately from the task of integrating patch information, and show that with a small amount of synthetic data, we can teach models to effectively use real patches on real data\u20141 to 7 patches improve accuracy by ~1\u20134 accuracy points on different slices of a sentiment analysis dataset, and F1 by 7 points on a relation extraction dataset. Finally, we show that finetuning on as many as 100 labeled examples may be needed to match the performance of a small set of language patches.", + "author": "Shikhar Murty; Christopher Manning; Scott Lundberg; Marco Tulio Ribeiro", + "authorids": "/s/shikhar-murty/; /c/christopher-d-manning/; /s/scott-lundberg/; /m/marco-tulio-ribeiro/", + "bibtex": "@inproceedings{murty-etal-2022-fixing,\n title = \"Fixing Model Bugs with Natural Language Patches\",\n author = \"Murty, Shikhar and\n Manning, Christopher and\n Lundberg, Scott and\n Ribeiro, Marco Tulio\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.797/\",\n doi = \"10.18653/v1/2022.emnlp-main.797\",\n pages = \"11600--11613\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.797.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.797/", + "pdf_size": 913302, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=681562161405372251&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 6, + "aff": "Computer Science Department, Stanford University; Computer Science Department, Stanford University; Microsoft Research; Microsoft Research", + "aff_domain": "cs.stanford.edu;cs.stanford.edu;microsoft.com;microsoft.com", + "email": "cs.stanford.edu;cs.stanford.edu;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;1", + "aff_unique_norm": "Stanford University;Microsoft Corporation", + "aff_unique_dep": "Computer Science Department;Microsoft Research", + "aff_unique_url": "https://www.stanford.edu;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "Stanford;MSR", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Stanford;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.715", + "title": "FlowEval: A Consensus-Based Dialogue Evaluation Framework Using Segment Act Flows", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite recent progress in open-domain dialogue evaluation, how to develop automatic metrics remains an open problem. We explore the potential of dialogue evaluation featuring dialog act information, which was hardly explicitly modeled in previous methods. However, defined at the utterance level in general, dialog act is of coarse granularity, as an utterance can contain multiple segments possessing different functions. Hence, we propose segment act, an extension of dialog act from utterance level to segment level, and crowdsource a large-scale dataset for it. To utilize segment act flows, sequences of segment acts, for evaluation, we develop the first consensus-based dialogue evaluation framework, FlowEval. This framework provides a reference-free approach for dialog evaluation by finding pseudo-references. Extensive experiments against strong baselines on three benchmark datasets demonstrate the effectiveness and other desirable characteristics of our FlowEval, pointing out a potential path for better dialogue evaluation.", + "author": "Jianqiao Zhao; Yanyang Li; Wanyu Du; Yangfeng Ji; Dong Yu; Michael Lyu; Liwei Wang", + "authorids": "/j/jianqiao-zhao/; /y/yanyang-li/; /w/wanyu-du/; /y/yangfeng-ji/; /d/dong-yu/; /m/michael-lyu/; /l/liwei-wang/", + "bibtex": "@inproceedings{zhao-etal-2022-floweval,\n title = \"{F}low{E}val: A Consensus-Based Dialogue Evaluation Framework Using Segment Act Flows\",\n author = \"Zhao, Jianqiao and\n Li, Yanyang and\n Du, Wanyu and\n Ji, Yangfeng and\n Yu, Dong and\n Lyu, Michael and\n Wang, Liwei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.715/\",\n doi = \"10.18653/v1/2022.emnlp-main.715\",\n pages = \"10469--10483\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.715.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.715/", + "pdf_size": 1473685, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12066583814635528325&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Engineering, The Chinese University of Hong Kong; Department of Computer Science and Engineering, The Chinese University of Hong Kong; Shanghai AI Laboratory; Department of Computer Science, University of Virginia; Tencent AI Lab, Bellevue; Department of Computer Science and Engineering, The Chinese University of Hong Kong; Department of Computer Science and Engineering, The Chinese University of Hong Kong + Shanghai AI Laboratory", + "aff_domain": "cse.cuhk.edu.hk;cse.cuhk.edu.hk; ; ; ;cse.cuhk.edu.hk;cse.cuhk.edu.hk", + "email": "cse.cuhk.edu.hk;cse.cuhk.edu.hk; ; ; ;cse.cuhk.edu.hk;cse.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;2;3;0;0+1", + "aff_unique_norm": "The Chinese University of Hong Kong;Shanghai AI Laboratory;University of Virginia;Tencent", + "aff_unique_dep": "Department of Computer Science and Engineering;;Department of Computer Science;AI Lab", + "aff_unique_url": "https://www.cuhk.edu.hk;https://www.shanghai-ai-lab.com;https://www.virginia.edu;https://ai.tencent.com", + "aff_unique_abbr": "CUHK;SAIL;UVA;Tencent AI Lab", + "aff_campus_unique_index": "0;0;2;0;0", + "aff_campus_unique": "Hong Kong;;Bellevue", + "aff_country_unique_index": "0;0;0;1;1;0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.450", + "title": "Focus! Relevant and Sufficient Context Selection for News Image Captioning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "News Image Captioning requires describing an image by leveraging additional context derived from a news article. Previous works only coarsely leverage the article to extract the necessary context, which makes it challenging for models to identify relevant events and named entities. In our paper, we first demonstrate that by combining more fine-grained context that captures the key named entities (obtained via an oracle) and the global context that summarizes the news, we can dramatically improve the model\u2019s ability to generate accurate news captions. This begs the question, how to automatically extract such key entities from an image? We propose to use pre-trained vision and language retrieval model CLIP to localize the visually grounded entities in the news article, and then capture the non-visual entities via a open relation extraction model. Our experiments demonstrate that by simply selecting better context from the article, we can significantly improve the performance of existing models and achieve the new state-of-the-art performance on multiple benchmarks.", + "author": "Mingyang Zhou; Grace Luo; Anna Rohrbach; Zhou Yu", + "authorids": "/m/mingyang-zhou/; /g/grace-luo/; /a/anna-rohrbach/; /z/zhou-yu/", + "bibtex": "@inproceedings{zhou-etal-2022-focus,\n title = \"Focus! Relevant and Sufficient Context Selection for News Image Captioning\",\n author = \"Zhou, Mingyang and\n Luo, Grace and\n Rohrbach, Anna and\n Yu, Zhou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.450/\",\n doi = \"10.18653/v1/2022.findings-emnlp.450\",\n pages = \"6078--6088\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.450.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.450/", + "pdf_size": 1267612, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17826626324730365111&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of California, Davis; University of California, Berkeley; University of California, Berkeley; Columbia University", + "aff_domain": "ucdavis.edu;berkeley.edu;berkeley.edu;columbia.edu", + "email": "ucdavis.edu;berkeley.edu;berkeley.edu;columbia.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;2", + "aff_unique_norm": "University of California, Davis;University of California, Berkeley;Columbia University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ucdavis.edu;https://www.berkeley.edu;https://www.columbia.edu", + "aff_unique_abbr": "UC Davis;UC Berkeley;Columbia", + "aff_campus_unique_index": "0;1;1", + "aff_campus_unique": "Davis;Berkeley;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.381", + "title": "FocusQA: Open-Domain Question Answering with a Context in Focus", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We introduce question answering with a cotext in focus, a task that simulates a free interaction with a QA system. The user reads on a screen some information about a topic, and they can follow-up with questions that can be either related or not to the topic; and the answer can be found in the document containing the screen content or from other pages. We call such information context. To study the task, we construct FocusQA, a dataset for answer sentence selection (AS2) with 12,165011unique question/context pairs, and a total of 109,940 answers. To build the dataset, we developed a novel methodology that takes existing questions and pairs them with relevant contexts. To show the benefits of this approach, we present a comparative analysis with a set of questions written by humans after reading the context, showing that our approach greatly helps in eliciting more realistic question/context pairs. Finally, we show that the task poses several challenges for incorporating contextual information. In this respect, we introduce strong baselines for answer sentence selection that outperform the precision of state-of-the-art models for AS2 up to 21.3% absolute points.", + "author": "Gianni Barlacchi; Ivano Lauriola; Alessandro Moschitti; Marco Del Tredici; Xiaoyu Shen; Thuy Vu; Bill Byrne; Adri\u00e0 de Gispert", + "authorids": "/g/gianni-barlacchi/; /i/ivano-lauriola/; /a/alessandro-moschitti/; /m/marco-del-tredici/; /x/xiaoyu-shen/; /t/thuy-vu/; /b/bill-byrne/; /a/adria-de-gispert/", + "bibtex": "@inproceedings{barlacchi-etal-2022-focusqa,\n title = \"{F}ocus{QA}: Open-Domain Question Answering with a Context in Focus\",\n author = \"Barlacchi, Gianni and\n Lauriola, Ivano and\n Moschitti, Alessandro and\n Del Tredici, Marco and\n Shen, Xiaoyu and\n Vu, Thuy and\n Byrne, Bill and\n de Gispert, Adri{\\`a}\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.381/\",\n doi = \"10.18653/v1/2022.findings-emnlp.381\",\n pages = \"5195--5208\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.381.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.381/", + "pdf_size": 1099384, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13461830836715136235&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI", + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Amazon", + "aff_unique_dep": "Alexa AI", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.435", + "title": "Foiling Training-Time Attacks on Neural Machine Translation Systems", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Neural machine translation (NMT) systems are vulnerable to backdoor attacks, whereby an attacker injects poisoned samples into training such that a trained model produces malicious translations. Nevertheless, there is little research on defending against such backdoor attacks in NMT. In this paper, we first show that backdoor attacks that have been successful in text classification are also effective against machine translation tasks. We then present a novel defence method that exploits a key property of most backdoor attacks: namely the asymmetry between the source and target language sentences, which is used to facilitate malicious text insertions, substitutions and suchlike. Our technique uses word alignment coupled with language model scoring to detect outlier tokens, and thus can find and filter out training instances which may contain backdoors. Experimental results demonstrate that our technique can significantly reduce the success of various attacks by up to 89.0%, while not affecting predictive accuracy.", + "author": "Jun Wang; Xuanli He; Benjamin Rubinstein; Trevor Cohn", + "authorids": "/j/jun-wang/; /x/xuanli-he/; /b/benjamin-rubinstein/; /t/trevor-cohn/", + "bibtex": "@inproceedings{wang-etal-2022-foiling,\n title = \"Foiling Training-Time Attacks on Neural Machine Translation Systems\",\n author = \"Wang, Jun and\n He, Xuanli and\n Rubinstein, Benjamin and\n Cohn, Trevor\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.435/\",\n doi = \"10.18653/v1/2022.findings-emnlp.435\",\n pages = \"5906--5913\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.435.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.435/", + "pdf_size": 261504, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4145538127874072254&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 0, + "aff": "University of Melbourne, Australia; University of Melbourne, Australia; University of Melbourne, Australia; University of Melbourne, Australia", + "aff_domain": "student.unimelb.edu.au;monash.edu;unimelb.edu.au;unimelb.edu.au", + "email": "student.unimelb.edu.au;monash.edu;unimelb.edu.au;unimelb.edu.au", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Melbourne", + "aff_unique_dep": "", + "aff_unique_url": "https://www.unimelb.edu.au", + "aff_unique_abbr": "UniMelb", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "2022.findings-emnlp.482", + "title": "Forging Multiple Training Objectives for Pre-trained Language Models via Meta-Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multiple pre-training objectives fill the vacancy of the understanding capability of single-objective language modeling, which serves the ultimate purpose of pre-trained language models (PrLMs), generalizing well on a mass of scenarios. However, learning multiple training objectives in a single model is challenging due to the unknown relative significance as well as the potential contrariety between them. Empirical studies have shown that the current objective sampling in an ad-hoc manual setting makes the learned language representation barely converge to the desired optimum. Thus, we propose MOMETAS, a novel adaptive sampler based on meta-learning, which learns the latent sampling pattern on arbitrary pre-training objectives. Such a design is lightweight with negligible additional training overhead. To validate our approach, we adopt five objectives and conduct continual pre-training with BERT-base and BERT-large models, where MOMETAS demonstrates universal performance gain over other rule-based sampling strategies on 14 natural language processing tasks.", + "author": "Hongqiu Wu; Ruixue Ding; Hai Zhao; Boli Chen; Pengjun Xie; Fei Huang; Min Zhang", + "authorids": "/h/hongqiu-wu/; /r/ruixue-ding/; /h/hai-zhao/; /b/boli-chen/; /p/pengjun-xie/; /f/fei-huang/; /m/min-zhang/", + "bibtex": "@inproceedings{wu-etal-2022-forging,\n title = \"Forging Multiple Training Objectives for Pre-trained Language Models via Meta-Learning\",\n author = \"Wu, Hongqiu and\n Ding, Ruixue and\n Zhao, Hai and\n Chen, Boli and\n Xie, Pengjun and\n Huang, Fei and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.482/\",\n doi = \"10.18653/v1/2022.findings-emnlp.482\",\n pages = \"6454--6466\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.482.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.482/", + "pdf_size": 629818, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17343432319840849448&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University+Damo Academy, Alibaba Group; Damo Academy, Alibaba Group; Department of Computer Science and Engineering, Shanghai Jiao Tong University+Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University; Damo Academy, Alibaba Group; Damo Academy, Alibaba Group; Damo Academy, Alibaba Group; School of Computer Science and Technology, Soochow University", + "aff_domain": "sjtu.edu.cn;alibaba-inc.com;cs.sjtu.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;suda.edu.cn", + "email": "sjtu.edu.cn;alibaba-inc.com;cs.sjtu.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;suda.edu.cn", + "github": "https://github.com/gingasan/mometas", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1;0+0;1;1;1;2", + "aff_unique_norm": "Shanghai Jiao Tong University;Alibaba Group;Soochow University", + "aff_unique_dep": "Department of Computer Science and Engineering;Damo Academy;School of Computer Science and Technology", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.alibaba-group.com;https://eng.suda.edu.cn/", + "aff_unique_abbr": "SJTU;Alibaba;Soochow U", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0;0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.557", + "title": "FormLM: Recommending Creation Ideas for Online Forms by Modelling Semantic and Structural Information", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Online forms are widely used to collect data from human and have a multi-billion market. Many software products provide online services for creating semi-structured forms where questions and descriptions are organized by predefined structures. However, the design and creation process of forms is still tedious and requires expert knowledge. To assist form designers, in this work we present FormLM to model online forms (by enhancing pre-trained language model with form structural information) and recommend form creation ideas (including question / options recommendations and block type suggestion). For model training and evaluation, we collect the first public online form dataset with 62K online forms. Experiment results show that FormLM significantly outperforms general-purpose language models on all tasks, with an improvement by 4.71 on Question Recommendation and 10.6 on Block Type Suggestion in terms of ROUGE-1 and Macro-F1, respectively.", + "author": "Yijia Shao; Mengyu Zhou; Yifan Zhong; Tao Wu; Hongwei Han; Shi Han; Gideon Huang; Dongmei Zhang", + "authorids": "/y/yijia-shao/; /m/mengyu-zhou/; /y/yifan-zhong/; /t/tao-wu/; /h/hongwei-han/; /s/shi-han/; /g/gideon-huang/; /d/dongmei-zhang/", + "bibtex": "@inproceedings{shao-etal-2022-formlm,\n title = \"{F}orm{LM}: Recommending Creation Ideas for Online Forms by Modelling Semantic and Structural Information\",\n author = \"Shao, Yijia and\n Zhou, Mengyu and\n Zhong, Yifan and\n Wu, Tao and\n Han, Hongwei and\n Han, Shi and\n Huang, Gideon and\n Zhang, Dongmei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.557/\",\n doi = \"10.18653/v1/2022.emnlp-main.557\",\n pages = \"8133--8149\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.557.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.557/", + "pdf_size": 1235186, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3516444718393127164&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Peking University; Microsoft Research; Fudan University; Microsoft; Tsinghua University; Microsoft Research; Microsoft; Microsoft Research", + "aff_domain": "pku.edu.cn;microsoft.com;fudan.edu.cn;microsoft.com;mails.tsinghua.edu.cn;microsoft.com;microsoft.com;microsoft.com", + "email": "pku.edu.cn;microsoft.com;fudan.edu.cn;microsoft.com;mails.tsinghua.edu.cn;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;1;3;1;1;1", + "aff_unique_norm": "Peking University;Microsoft Corporation;Fudan University;Tsinghua University", + "aff_unique_dep": ";Microsoft Research;;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.microsoft.com/en-us/research;https://www.fudan.edu.cn;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "Peking U;MSR;Fudan;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;1;0;1;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.232", + "title": "Formulating Few-shot Fine-tuning Towards Language Model Pre-training: A Pilot Study on Named Entity Recognition", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Fine-tuning pre-trained language models is a common practice in building NLP models for various tasks, including the case with less supervision. We argue that under the few-shot setting, formulating fine-tuning closer to the pre-training objective shall be able to unleash more benefits from the pre-trained language models. In this work, we take few-shot named entity recognition (NER) for a pilot study, where existing fine-tuning strategies are much different from pre-training. We propose a novel few-shot fine-tuning framework for NER, FFF-NER. Specifically, we introduce three new types of tokens, \u201cis-entity\u201d, \u201cwhich-type\u201d and \u201cbracket\u201d, so we can formulate the NER fine-tuning as (masked) token prediction or generation, depending on the choice of the pre-training objective. In our experiments, we apply to fine-tune both BERT and BART for few-shot NER on several benchmark datasets and observe significant improvements over existing fine-tuning strategies, including sequence labeling, prototype meta-learning, and prompt-based approaches. We further perform a series of ablation studies, showing few-shot NER performance is strongly correlated with the similarity between fine-tuning and pre-training.", + "author": "Zihan Wang; Kewen Zhao; Zilong Wang; Jingbo Shang", + "authorids": "/z/zihan-wang/; /k/kewen-zhao/; /z/zilong-wang/; /j/jingbo-shang/", + "bibtex": "@inproceedings{wang-etal-2022-formulating,\n title = \"Formulating Few-shot Fine-tuning Towards Language Model Pre-training: A Pilot Study on Named Entity Recognition\",\n author = \"Wang, Zihan and\n Zhao, Kewen and\n Wang, Zilong and\n Shang, Jingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.232/\",\n doi = \"10.18653/v1/2022.findings-emnlp.232\",\n pages = \"3186--3199\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.232.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.232/", + "pdf_size": 722330, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8002492245869284477&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of California, San Diego; University of California, San Diego; University of California, San Diego; University of California, San Diego", + "aff_domain": "ucsd.edu;ucsd.edu;ucsd.edu;ucsd.edu", + "email": "ucsd.edu;ucsd.edu;ucsd.edu;ucsd.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of California, San Diego", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucsd.edu", + "aff_unique_abbr": "UCSD", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "San Diego", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.477", + "title": "From Mimicking to Integrating: Knowledge Integration for Pre-Trained Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Investigating better ways to reuse the released pre-trained language models (PLMs) can significantly reduce the computational cost and the potential environmental side-effects. This paper explores a novel PLM reuse paradigm, Knowledge Integration (KI). Without human annotations available, KI aims to merge the knowledge from different teacher-PLMs, each of which specializes in a different classification problem, into a versatile student model. To achieve this, we first derive the correlation between virtual golden supervision and teacher predictions. We then design a Model Uncertainty\u2013aware Knowledge Integration (MUKI) framework to recover the golden supervision for the student. Specifically, MUKI adopts Monte-Carlo Dropout to estimate model uncertainty for the supervision integration. An instance-wise re-weighting mechanism based on the margin of uncertainty scores is further incorporated, to deal with the potential conflicting supervision from teachers.Experimental results demonstrate that MUKI achieves substantial improvements over baselines on benchmark datasets. Further analysis shows that MUKI can generalize well for merging teacher models with heterogeneous architectures, and even teachers major in cross-lingual datasets.", + "author": "Lei Li; Yankai Lin; Xuancheng Ren; Guangxiang Zhao; Peng Li; Jie Zhou; Xu Sun", + "authorids": "/l/lei-li/; /y/yankai-lin/; /x/xuancheng-ren/; /g/guangxiang-zhao/; /p/peng-li/; /j/jie-zhou/; /x/xu-sun/", + "bibtex": "@inproceedings{li-etal-2022-mimicking,\n title = \"From Mimicking to Integrating: Knowledge Integration for Pre-Trained Language Models\",\n author = \"Li, Lei and\n Lin, Yankai and\n Ren, Xuancheng and\n Zhao, Guangxiang and\n Li, Peng and\n Zhou, Jie and\n Sun, Xu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.477/\",\n doi = \"10.18653/v1/2022.findings-emnlp.477\",\n pages = \"6391--6402\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.477.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.477/", + "pdf_size": 578362, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12227256764400375127&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "MOE Key Lab of Computational Linguistics, School of Computer Science, Peking University; Gaoling School of Artificial Intelligence, Renmin University of China, Beijing, China+Beijing Key Laboratory of Big Data Management and Analysis Methods, Beijing, China; MOE Key Lab of Computational Linguistics, School of Computer Science, Peking University; MOE Key Lab of Computational Linguistics, School of Computer Science, Peking University; Institute for AI Industry Research (AIR), Tsinghua University, China; Pattern Recognition Center, WeChat AI, Tencent Inc., China; MOE Key Lab of Computational Linguistics, School of Computer Science, Peking University", + "aff_domain": "stu.pku.edu.cn; ; ; ; ; ;pku.edu.cn", + "email": "stu.pku.edu.cn; ; ; ; ; ;pku.edu.cn", + "github": "https://github.com/lancopku/MUKI", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1+2;0;0;3;4;0", + "aff_unique_norm": "Peking University;Renmin University of China;Beijing Key Laboratory of Big Data Management and Analysis Methods;Tsinghua University;Tencent Inc.", + "aff_unique_dep": "School of Computer Science;Gaoling School of Artificial Intelligence;Big Data Management and Analysis;Institute for AI Industry Research (AIR);Pattern Recognition Center, WeChat AI", + "aff_unique_url": "http://www.pku.edu.cn;http://www.ruc.edu.cn;;https://www.tsinghua.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "PKU;RUC;;Tsinghua;Tencent", + "aff_campus_unique_index": "1+1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.63", + "title": "From Spelling to Grammar: A New Framework for Chinese Grammatical Error Correction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Chinese Grammatical Error Correction (CGEC) aims to generate a correct sentence from an erroneous sequence, where different kinds of errors are mixed. This paper divides the CGEC task into two steps, namely spelling error correction and grammatical error correction. We firstly propose a novel zero-shot approach for spelling error correction, which is simple but effective, obtaining a high precision to avoid error accumulation of the pipeline structure. To handle grammatical error correction, we design part-of-speech (POS) features and semantic class features to enhance the neural network model, and propose an auxiliary task to predict the POS sequence of the target sentence. Our proposed framework achieves a 42.11 F-0.5 score on CGEC dataset without using any synthetic data or data augmentation methods, which outperforms the previous state-of-the-art by a wide margin of 1.30 points. Moreover, our model produces meaningful POS representations that capture different POS words and convey reasonable POS transition rules.", + "author": "Xiuyu Wu; Yunfang Wu", + "authorids": "/x/xiuyu-wu/; /y/yunfang-wu/", + "bibtex": "@inproceedings{wu-wu-2022-spelling,\n title = \"From Spelling to Grammar: A New Framework for {C}hinese Grammatical Error Correction\",\n author = \"Wu, Xiuyu and\n Wu, Yunfang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.63/\",\n doi = \"10.18653/v1/2022.findings-emnlp.63\",\n pages = \"889--902\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.63.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.63/", + "pdf_size": 770972, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11976197920188648335&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "MOE Key Laboratory of Computational Linguistics, Peking University, Beijing, China+School of Software and Microelectronics, Peking University, Beijing, China; MOE Key Laboratory of Computational Linguistics, Peking University, Beijing, China+School of Computer Science, Peking University, Beijing, China", + "aff_domain": "pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "MOE Key Laboratory of Computational Linguistics", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-industry.54", + "title": "Full-Stack Information Extraction System for Cybersecurity Intelligence", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Due to rapidly growing cyber-attacks and security vulnerabilities, many reports on cyber-threat intelligence (CTI) are being published daily. While these reports can help security analysts to understand on-going cyber threats,the overwhelming amount of information makes it difficult to digest the information in a timely manner. This paper presents, SecIE, an industrial-strength full-stack information extraction (IE) system for the security domain. SecIE can extract a large number of security entities, relations and the temporal information of the relations, which is critical for cyberthreat investigations. Our evaluation with 133 labeled threat reports containing 108,021 tokens shows thatSecIE achieves over 92% F1-score for entity extraction and about 70% F1-score for relation extraction. We also showcase how SecIE can be used for downstream security applications.", + "author": "Youngja Park; Taesung Lee", + "authorids": "/y/youngja-park/; /t/taesung-lee/", + "bibtex": "@inproceedings{park-lee-2022-full,\n title = \"Full-Stack Information Extraction System for Cybersecurity Intelligence\",\n author = \"Park, Youngja and\n Lee, Taesung\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.54/\",\n doi = \"10.18653/v1/2022.emnlp-industry.54\",\n pages = \"531--539\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.54.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.54/", + "pdf_size": 2130203, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6555936650577295415&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "", + "project": "https://www.cyberdefensemagazine.com/teamxrat-spreads-ransomware-via-rdp-brute-force-attacks/", + "author_num": 2 + }, + { + "id": "2022.emnlp-main.441", + "title": "G-MAP: General Memory-Augmented Pre-trained Language Model for Domain Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "General pre-trained language models (PLMs), such as BERT, have achieved remarkable performance on various NLP tasks. Recently, domain-specific PLMs have been proposed to boost the task performance of specific domains (e.g., biomedical and computer science) by continuing to pre-train general PLMs with domain-specific corpora. However, this domain-adaptive pre-training (DAPT (CITATION)) tends to forget the previous general knowledge acquired by general PLMs, which leads to a catastrophic forgetting phenomenon and sub-optimal performance. To alleviate this problem, we propose a new framework of Memory-Augmented Pre-trained Language Model (MAP), which augments the domain-specific PLM by a memory built from the frozen general PLM without losing the general knowledge. Specifically, we propose a new memory-augmented layer, and based on it, different augmentation strategies are explored to build memory and fusion memory into domain-specific PLM. We demonstrate the effectiveness of MAP on different domains (biomedical and computer science publications, news, and reviews) and different kinds (text classification, QA, NER) of tasks, and the extensive results show that the proposed MAP can achieve SOTA results on these tasks.", + "author": "Zhongwei Wan; Yichun Yin; Wei Zhang; Jiaxin Shi; Lifeng Shang; Guangyong Chen; Xin Jiang; Qun Liu", + "authorids": "/z/zhongwei-wan/; /y/yichun-yin/; /w/wei-zhang/; /j/jiaxin-shi/; /l/lifeng-shang/; /g/guangyong-chen/; /x/xin-jiang/; /q/qun-liu/", + "bibtex": "@inproceedings{wan-etal-2022-g,\n title = \"{G}-{MAP}: General Memory-Augmented Pre-trained Language Model for Domain Tasks\",\n author = \"Wan, Zhongwei and\n Yin, Yichun and\n Zhang, Wei and\n Shi, Jiaxin and\n Shang, Lifeng and\n Chen, Guangyong and\n Jiang, Xin and\n Liu, Qun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.441/\",\n doi = \"10.18653/v1/2022.emnlp-main.441\",\n pages = \"6585--6597\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.441.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.441/", + "pdf_size": 816650, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13710563970120876107&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology, Shenzhen Institute of Advanced Technology, Chinese Academy of Science+University of Chinese Academy of Sciences; Huawei Noah\u2019s Ark Lab; Huawei Cloud Computing; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Zhejiang Lab+Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab", + "aff_domain": "siat.ac.cn;huawei.com;huawei.com;huawei.com;huawei.com;zhejianglab.com;huawei.com;huawei.com", + "email": "siat.ac.cn;huawei.com;huawei.com;huawei.com;huawei.com;zhejianglab.com;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;2;2;2;2;3+2;2;2", + "aff_unique_norm": "Chinese Academy of Science;University of Chinese Academy of Sciences;Huawei;Zhejiang Lab", + "aff_unique_dep": "Guangdong Provincial Key Laboratory of Computer Vision and Virtual Reality Technology;;Noah\u2019s Ark Lab;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.huawei.com;http://www.zhejianglab.com", + "aff_unique_abbr": "CAS;UCAS;Huawei;", + "aff_campus_unique_index": "0;", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0;0;0;0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.430", + "title": "G3: Geolocation via Guidebook Grounding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We demonstrate how language can improve geolocation: the task of predicting the location where an image was taken. Here we study explicit knowledge from human-written guidebooks that describe the salient and class-discriminative visual features humans use for geolocation. We propose the task of Geolocation via Guidebook Grounding that uses a dataset of StreetView images from a diverse set of locations and an associated textual guidebook for GeoGuessr, a popular interactive geolocation game. Our approach predicts a country for each image by attending over the clues automatically extracted from the guidebook. Supervising attention with country-level pseudo labels achieves the best performance. Our approach substantially outperforms a state-of-the-art image-only geolocation method, with an improvement of over 5% in Top-1 accuracy. Our dataset and code can be found at https://github.com/g-luo/geolocation_via_guidebook_grounding.", + "author": "Grace Luo; Giscard Biamby; Trevor Darrell; Daniel Fried; Anna Rohrbach", + "authorids": "/g/grace-luo/; /g/giscard-biamby/; /t/trevor-darrell/; /d/daniel-fried/; /a/anna-rohrbach/", + "bibtex": "@inproceedings{luo-etal-2022-g3,\n title = \"G3: Geolocation via Guidebook Grounding\",\n author = \"Luo, Grace and\n Biamby, Giscard and\n Darrell, Trevor and\n Fried, Daniel and\n Rohrbach, Anna\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.430/\",\n doi = \"10.18653/v1/2022.findings-emnlp.430\",\n pages = \"5841--5853\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.430.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.430/", + "pdf_size": 7729717, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3408969462622263417&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "University of California, Berkeley\u2020; University of California, Berkeley\u2020; University of California, Berkeley\u2020; Carnegie Mellon University\u2021; University of California, Berkeley\u2020", + "aff_domain": "berkeley.edu;berkeley.edu;berkeley.edu;cs.cmu.edu;berkeley.edu", + "email": "berkeley.edu;berkeley.edu;berkeley.edu;cs.cmu.edu;berkeley.edu", + "github": "https://github.com/g-luo/geolocation_via_guidebook_grounding", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "University of California, Berkeley;Carnegie Mellon University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.berkeley.edu;https://www.cmu.edu", + "aff_unique_abbr": "UC Berkeley;CMU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Berkeley;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.257", + "title": "GA-SAM: Gradient-Strength based Adaptive Sharpness-Aware Minimization for Improved Generalization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently, Sharpness-Aware Minimization (SAM) algorithm has shown state-of-the-art generalization abilities in vision tasks. It demonstrates that flat minima tend to imply better generalization abilities. However, it has some difficulty implying SAM to some natural language tasks, especially to models with drastic gradient changes, such as RNNs. In this work, we analyze the relation between the flatness of the local minimum and its generalization ability from a novel and straightforward theoretical perspective. We propose that the shift of the training and test distributions can be equivalently seen as a virtual parameter corruption or perturbation, which can explain why flat minima that are robust against parameter corruptions or perturbations have better generalization performances. On its basis, we propose a Gradient-Strength based Adaptive Sharpness-Aware Minimization (GA-SAM) algorithm to help to learn algorithms find flat minima that generalize better. Results in various language benchmarks validate the effectiveness of the proposed GA-SAM algorithm on natural language tasks.", + "author": "Zhiyuan Zhang; Ruixuan Luo; Qi Su; Xu Sun", + "authorids": "/z/zhiyuan-zhang/; /r/ruixuan-luo/; /q/qi-su/; /x/xu-sun/", + "bibtex": "@inproceedings{zhang-etal-2022-ga,\n title = \"{GA}-{SAM}: Gradient-Strength based Adaptive Sharpness-Aware Minimization for Improved Generalization\",\n author = \"Zhang, Zhiyuan and\n Luo, Ruixuan and\n Su, Qi and\n Sun, Xu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.257/\",\n doi = \"10.18653/v1/2022.emnlp-main.257\",\n pages = \"3888--3903\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.257.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.257/", + "pdf_size": 457905, + "gs_citation": -1, + "gs_cited_by_link": "", + "gs_version_total": 0, + "aff": "MOE Key Laboratory of Computational Linguistics, School of Computer Science, Peking University+1; Center for Data Science, Peking University; School of Foreign Languages, Peking University+1; MOE Key Laboratory of Computational Linguistics, School of Computer Science, Peking University", + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Peking University;", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "http://www.pku.edu.cn;", + "aff_unique_abbr": "PKU;", + "aff_campus_unique_index": ";1;", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "2022.emnlp-main.787", + "title": "GENIE: Toward Reproducible and Standardized Human Evaluation for Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "While often assumed a gold standard, effective human evaluation of text generation remains an important, open area for research.We revisit this problem with a focus on producing consistent evaluations that are reproducible\u2014over time and across different populations. We study this goal in different stages of the human evaluation pipeline. In particular, we consider design choices for the annotation interface used to elicit human judgments and their impact on reproducibility. Furthermore, we develop an automated mechanism for maintaining annotator quality via a probabilistic model that detects and excludes noisy annotators. Putting these lessons together, we introduce GENIE: a system for running standardized human evaluations across different generation tasks.We instantiate GENIE with datasets representing four core challenges in text generation: machine translation, summarization, commonsense reasoning, and machine comprehension.For each task, GENIE offers a leaderboard that automatically crowdsources annotations for submissions, evaluating them along axes such as correctness, conciseness, and fluency.We have made the GENIE leaderboards publicly available, and have already ranked 50 submissions from 10 different research groups. We hope GENIE encourages further progress toward effective, standardized evaluations for text generation.", + "author": "Daniel Khashabi; Gabriel Stanovsky; Jonathan Bragg; Nicholas Lourie; Jungo Kasai; Yejin Choi; Noah A. Smith; Daniel Weld", + "authorids": "/d/daniel-khashabi/; /g/gabriel-stanovsky/; /j/jonathan-bragg/; /n/nicholas-lourie/; /j/jungo-kasai/; /y/yejin-choi/; /n/noah-a-smith/; /d/daniel-s-weld/", + "bibtex": "@inproceedings{khashabi-etal-2022-genie,\n title = \"{GENIE}: Toward Reproducible and Standardized Human Evaluation for Text Generation\",\n author = \"Khashabi, Daniel and\n Stanovsky, Gabriel and\n Bragg, Jonathan and\n Lourie, Nicholas and\n Kasai, Jungo and\n Choi, Yejin and\n Smith, Noah A. and\n Weld, Daniel\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.787/\",\n doi = \"10.18653/v1/2022.emnlp-main.787\",\n pages = \"11444--11458\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.787.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.787/", + "pdf_size": 1488912, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=407691519402069044&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "https://genie.apps.allenai.org", + "author_num": 8 + }, + { + "id": "2022.emnlp-main.374", + "title": "GHAN: Graph-Based Hierarchical Aggregation Network for Text-Video Retrieval", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Text-video retrieval focuses on two aspects: cross-modality interaction and video-language encoding. Currently, the mainstream approach is to train a joint embedding space for multimodal interactions. However, there are structural and semantic differences between text and video, making this approach challenging for fine-grained understanding. In order to solve this, we propose an end-to-end graph-based hierarchical aggregation network for text-video retrieval according to the hierarchy possessed by text and video. We design a token-level weighted network to refine intra-modality representations and construct a graph-based message passing attention network for global-local alignment across modality. We conduct experiments on the public datasets MSR-VTT-9K, MSR-VTT-7K and MSVD, and achieve Recall@1 of 73.0%, 65.6%, and 64.0% , which is 25.7%, 16.5%, and 14.2% better than the current state-of-the-art model.", + "author": "Yahan Yu; Bojie Hu; Yu Li", + "authorids": "/y/yahan-yu/; /b/bojie-hu/; /y/yu-li/", + "bibtex": "@inproceedings{yu-etal-2022-ghan,\n title = \"{GHAN}: Graph-Based Hierarchical Aggregation Network for Text-Video Retrieval\",\n author = \"Yu, Yahan and\n Hu, Bojie and\n Li, Yu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.374/\",\n doi = \"10.18653/v1/2022.emnlp-main.374\",\n pages = \"5547--5557\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.374.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.374/", + "pdf_size": 1165337, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2967554044593865058&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "Tencent Minority-Mandarin Translation, Beijing, China; Tencent Minority-Mandarin Translation, Beijing, China; Tencent Minority-Mandarin Translation, Beijing, China + Beijing Key Lab of Traffic Data Analysis and Mining, Beijing Jiaotong University, China", + "aff_domain": "gmail.com;tencent.com;bjtu.com", + "email": "gmail.com;tencent.com;bjtu.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "Tencent;Beijing Jiaotong University", + "aff_unique_dep": "Minority-Mandarin Translation;Beijing Key Lab of Traffic Data Analysis and Mining", + "aff_unique_url": "https://www.tencent.com;http://www.bjtu.edu.cn", + "aff_unique_abbr": "Tencent;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.39", + "title": "GNN-encoder: Learning a Dual-encoder Architecture via Graph Neural Networks for Dense Passage Retrieval", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recently, retrieval models based on dense representations are dominant in passage retrieval tasks, due to their outstanding ability in terms of capturing semantics of input text compared to the traditional sparse vector space models. A common practice of dense retrieval models is to exploit a dual-encoder architecture to represent a query and a passage independently. Though efficient, such a structure loses interaction between the query-passage pair, resulting in inferior accuracy. To enhance the performance of dense retrieval models without loss of efficiency, we propose a GNN-encoder model in which query (passage) information is fused into passage (query) representations via graph neural networks that are constructed by queries and their top retrieved passages. By this means, we maintain a dual-encoder structure, and retain some interaction information between query-passage pairs in their representations, which enables us to achieve both efficiency and efficacy in passage retrieval. Evaluation results indicate that our method significantly outperforms the existing models on MSMARCO, Natural Questions and TriviaQA datasets, and achieves the new state-of-the-art on these datasets.", + "author": "Jiduan Liu; Jiahao Liu; Yang Yang; Jingang Wang; Wei Wu; Dongyan Zhao; Rui Yan", + "authorids": "/j/jiduan-liu/; /j/jiahao-liu/; /y/yang-yang/; /j/jingang-wang/; /w/wei-wu/; /d/dongyan-zhao/; /r/rui-yan/", + "bibtex": "@inproceedings{liu-etal-2022-gnn,\n title = \"{GNN}-encoder: Learning a Dual-encoder Architecture via Graph Neural Networks for Dense Passage Retrieval\",\n author = \"Liu, Jiduan and\n Liu, Jiahao and\n Yang, Yang and\n Wang, Jingang and\n Wu, Wei and\n Zhao, Dongyan and\n Yan, Rui\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.39/\",\n doi = \"10.18653/v1/2022.findings-emnlp.39\",\n pages = \"564--575\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.39.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.39/", + "pdf_size": 931785, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8934377837041910170&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "", + "project": "", + "author_num": 7 + }, + { + "id": "2022.emnlp-main.559", + "title": "GPS: Genetic Prompt Search for Efficient Few-Shot Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prompt-based techniques have demostrated great potential for improving the few-shot generalization of pretrained language models. However, their performance heavily relies on the manual design of prompts and thus requiring a lot of human efforts. In this paper, we introduce Genetic Prompt Search (GPS) to improve few-shot learning with prompts, which utilizes a genetic algorithm to automatically search for the best prompt.GPS is gradient-free and requires no update of model parameters but only a small validation set. Experiments on diverse datasets proved the effectiveness of GPS, which outperforms manual prompts by a large margin of 2.6 points. Our method is also better than other parameter-efficient tuning methods such as prompt tuning.", + "author": "Hanwei Xu; Yujun Chen; Yulun Du; Nan Shao; Wang Yanggang; Haiyu Li; Zhilin Yang", + "authorids": "/h/hanwei-xu/; /y/yujun-chen/; /y/yulun-du/; /n/nan-shao/; /w/wang-yanggang/; /h/haiyu-li/; /z/zhilin-yang/", + "bibtex": "@inproceedings{xu-etal-2022-gps,\n title = \"{GPS}: Genetic Prompt Search for Efficient Few-Shot Learning\",\n author = \"Xu, Hanwei and\n Chen, Yujun and\n Du, Yulun and\n Shao, Nan and\n Yanggang, Wang and\n Li, Haiyu and\n Yang, Zhilin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.559/\",\n doi = \"10.18653/v1/2022.emnlp-main.559\",\n pages = \"8162--8171\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.559.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.559/", + "pdf_size": 360752, + "gs_citation": 69, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6553540641527951681&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Recurrent AI; Recurrent AI; Recurrent AI; Recurrent AI; Recurrent AI; Recurrent AI; Recurrent AI", + "aff_domain": "rcrai.com;rcrai.com;rcrai.com;rcrai.com; ; ; ", + "email": "rcrai.com;rcrai.com;rcrai.com;rcrai.com; ; ; ", + "github": "https://github.com/hwxu20/GPS", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Recurrent AI", + "aff_unique_dep": "", + "aff_unique_url": "https://www.recurrent.ai", + "aff_unique_abbr": "Recurrent AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.506", + "title": "GREENER: Graph Neural Networks for News Media Profiling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We study the problem of profiling news media on the Web with respect to their factuality of reporting and bias. This is an important but under-studied problem related to disinformation and \u201cfake news\u201d detection, but it addresses the issue at a coarser granularity compared to looking at an individual article or an individual claim. This is useful as it allows to profile entire media outlets in advance. Unlike previous work, which has focused primarily on text (e.g., on the text of the articles published by the target website, or on the textual description in their social media profiles or in Wikipedia), here our main focus is on modeling the similarity between media outlets based on the overlap of their audience. This is motivated by homophily considerations, i.e., the tendency of people to have connections to people with similar interests, which we extend to media, hypothesizing that similar types of media would be read by similar kinds of users. In particular, we propose GREENER (GRaph nEural nEtwork for News mEdia pRofiling), a model that builds a graph of inter-media connections based on their audience overlap, and then uses graph neural networks to represent each medium. We find that such representations are quite useful for predicting the factuality and the bias of news media outlets, yielding improvements over state-of-the-art results reported on two datasets. When augmented with conventionally used representations obtained from news articles, Twitter, YouTube, Facebook, and Wikipedia, prediction accuracy is found to improve by 2.5-27 macro-F1 points for the two tasks.", + "author": "Panayot Panayotov; Utsav Shukla; Husrev Taha Sencar; Mohamed Nabeel; Preslav Nakov", + "authorids": "/p/panayot-panayotov/; /u/utsav-shukla/; /h/husrev-taha-sencar/; /m/mohamed-nabeel/; /p/preslav-nakov/", + "bibtex": "@inproceedings{panayotov-etal-2022-greener,\n title = \"{GREENER}: Graph Neural Networks for News Media Profiling\",\n author = \"Panayotov, Panayot and\n Shukla, Utsav and\n Sencar, Husrev Taha and\n Nabeel, Mohamed and\n Nakov, Preslav\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.506/\",\n doi = \"10.18653/v1/2022.emnlp-main.506\",\n pages = \"7470--7480\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.506.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.506/", + "pdf_size": 741541, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7340163506444930098&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Sofia University; TIET; QCRI, HBKU; QCRI, HBKU; Mohamed bin Zayed University of Artificial Intelligence", + "aff_domain": "gmail.com; ;hbku.edu.qa;hbku.edu.qa;mbzuai.ac.ae", + "email": "gmail.com; ;hbku.edu.qa;hbku.edu.qa;mbzuai.ac.ae", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;2;3", + "aff_unique_norm": "Sofia University;Thapar Institute of Engineering and Technology;Qatar Computing Research Institute;Mohamed bin Zayed University of Artificial Intelligence", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.sofiauni.bg/en/;https://www.tiet.ac.in;https://www.qcri.org;https://www.mbzuai.ac.ae", + "aff_unique_abbr": "Sofia U;TIET;QCRI;MBZUAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;2;3", + "aff_country_unique": "Bulgaria;India;Qatar;United Arab Emirates" + }, + { + "id": "2022.emnlp-industry.22", + "title": "Gaining Insights into Unrecognized User Utterances in Task-Oriented Dialog Systems", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "The rapidly growing market demand for automatic dialogue agents capable of goal-oriented behavior has caused many tech-industry leaders to invest considerable efforts into task-oriented dialog systems. The success of these systems is highly dependent on the accuracy of their intent identification \u2013 the process of deducing the goal or meaning of the user\u2019s request and mapping it to one of the known intents for further processing. Gaining insights into unrecognized utterances \u2013 user requests the systems fails to attribute to a known intent \u2013 is therefore a key process in continuous improvement of goal-oriented dialog systems. We present an end-to-end pipeline for processing unrecognized user utterances, deployed in a real-world, commercial task-oriented dialog system, including a specifically-tailored clustering algorithm, a novel approach to cluster representative extraction, and cluster naming. We evaluated the proposed components, demonstrating their benefits in the analysis of unrecognized user requests.", + "author": "Ella Rabinovich; Matan Vetzler; David Boaz; Vineet Kumar; Gaurav Pandey; Ateret Anaby Tavor", + "authorids": "/e/ella-rabinovich/; /m/matan-vetzler/; /d/david-boaz/; /v/vineet-kumar/; /g/gaurav-pandey/; /a/ateret-anaby-tavor/", + "bibtex": "@inproceedings{rabinovich-etal-2022-gaining,\n title = \"Gaining Insights into Unrecognized User Utterances in Task-Oriented Dialog Systems\",\n author = \"Rabinovich, Ella and\n Vetzler, Matan and\n Boaz, David and\n Kumar, Vineet and\n Pandey, Gaurav and\n Anaby Tavor, Ateret\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.22/\",\n doi = \"10.18653/v1/2022.emnlp-industry.22\",\n pages = \"218--225\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.22.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.22/", + "pdf_size": 1186111, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11993502898937426481&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research", + "aff_domain": "ibm.com;ibm.com;il.ibm.com;in.ibm.com;in.ibm.com;il.ibm.com", + "email": "ibm.com;ibm.com;il.ibm.com;in.ibm.com;in.ibm.com;il.ibm.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "IBM", + "aff_unique_dep": "IBM Research", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.47", + "title": "GammaE: Gamma Embeddings for Logical Queries on Knowledge Graphs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Embedding knowledge graphs (KGs) for multi-hop logical reasoning is a challenging problem due to massive and complicated structures in many KGs. Recently, many promising works projected entities and queries into a geometric space to efficiently find answers. However, it remains challenging to model the negation and union operator. The negation operator has no strict boundaries, which generates overlapped embeddings and leads to obtaining ambiguous answers. An additional limitation is that the union operator is non-closure, which undermines the model to handle a series of union operators. To address these problems, we propose a novel probabilistic embedding model, namely Gamma Embeddings (GammaE), for encoding entities and queries to answer different types of FOL queries on KGs. We utilize the linear property and strong boundary support of the Gamma distribution to capture more features of entities and queries, which dramatically reduces model uncertainty. Furthermore, GammaE implements the Gamma mixture method to design the closed union operator. The performance of GammaE is validated on three large logical query datasets. Experimental results show that GammaE significantly outperforms state-of-the-art models on public benchmarks.", + "author": "Dong Yang; Peijun Qing; Yang Li; Haonan Lu; Xiaodong Lin", + "authorids": "/d/dong-yang/; /p/peijun-qing/; /y/yang-li/; /h/haonan-lu/; /x/xiaodong-lin/", + "bibtex": "@inproceedings{yang-etal-2022-gammae,\n title = \"{G}amma{E}: Gamma Embeddings for Logical Queries on Knowledge Graphs\",\n author = \"Yang, Dong and\n Qing, Peijun and\n Li, Yang and\n Lu, Haonan and\n Lin, Xiaodong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.47/\",\n doi = \"10.18653/v1/2022.emnlp-main.47\",\n pages = \"745--760\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.47.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.47/", + "pdf_size": 709601, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9776386119226043737&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "OPPO Research Institute, Shenzhen, China; Xidian University, Xi\u2019an, China; The Hong Kong Polytechnic University, Hong Kong, China; OPPO Research Institute, Shenzhen, China; Rutgers University, USA", + "aff_domain": "oppo.com;oppo.com;gmail.com;connect.polyu.hk;business.rutgers.edu", + "email": "oppo.com;oppo.com;gmail.com;connect.polyu.hk;business.rutgers.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;3", + "aff_unique_norm": "OPPO Research Institute;Xidian University;The Hong Kong Polytechnic University;Rutgers University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.oppo.com;http://www.xidian.edu.cn;https://www.polyu.edu.hk;https://www.rutgers.edu", + "aff_unique_abbr": "OPPO RI;Xidian;PolyU;Rutgers", + "aff_campus_unique_index": "0;1;2;0", + "aff_campus_unique": "Shenzhen;Xi'an;Hong Kong;", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.227", + "title": "Gender Bias in Meta-Embeddings", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Different methods have been proposed to develop meta-embeddings from a given set of source embeddings. However, the source embeddings can contain unfair gender-related biases, and how these influence the meta-embeddings has not been studied yet.We study the gender bias in meta-embeddings created under three different settings:(1) meta-embedding multiple sources without performing any debiasing (Multi-Source No-Debiasing),(2) meta-embedding multiple sources debiased by a single method (Multi-Source Single-Debiasing), and(3) meta-embedding a single source debiased by different methods (Single-Source Multi-Debiasing).Our experimental results show that meta-embedding amplifies the gender biases compared to input source embeddings.We find that debiasing not only the sources but also their meta-embedding is needed to mitigate those biases.Moreover, we propose a novel debiasing method based on meta-embedding learning where we use multiple debiasing methods on a single source embedding and then create a single unbiased meta-embedding.", + "author": "Masahiro Kaneko; Danushka Bollegala; Naoaki Okazaki", + "authorids": "/m/masahiro-kaneko/; /d/danushka-bollegala/; /n/naoaki-okazaki/", + "bibtex": "@inproceedings{kaneko-etal-2022-gender-bias,\n title = \"Gender Bias in Meta-Embeddings\",\n author = \"Kaneko, Masahiro and\n Bollegala, Danushka and\n Okazaki, Naoaki\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.227/\",\n doi = \"10.18653/v1/2022.findings-emnlp.227\",\n pages = \"3118--3133\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.227.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.227/", + "pdf_size": 473582, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11335164865522863669&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Tokyo Institute of Technology; University of Liverpool+Amazon; Tokyo Institute of Technology", + "aff_domain": "nlp.c.titech.ac.jp;liverpool.ac.uk;c.titech.ac.jp", + "email": "nlp.c.titech.ac.jp;liverpool.ac.uk;c.titech.ac.jp", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;0", + "aff_unique_norm": "Tokyo Institute of Technology;University of Liverpool;Amazon.com, Inc.", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.titech.ac.jp;https://www.liverpool.ac.uk;https://www.amazon.com", + "aff_unique_abbr": "Titech;Liv Uni;Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+2;0", + "aff_country_unique": "Japan;United Kingdom;United States" + }, + { + "id": "2022.emnlp-main.139", + "title": "Gendered Mental Health Stigma in Masked Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Mental health stigma prevents many individuals from receiving the appropriate care, and social psychology studies have shown that mental health tends to be overlooked in men. In this work, we investigate gendered mental health stigma in masked language models. In doing so, we operationalize mental health stigma by developing a framework grounded in psychology research: we use clinical psychology literature to curate prompts, then evaluate the models\u2019 propensity to generate gendered words. We find that masked language models capture societal stigma about gender in mental health: models are consistently more likely to predict female subjects than male in sentences about having a mental health condition (32% vs. 19%), and this disparity is exacerbated for sentences that indicate treatment-seeking behavior. Furthermore, we find that different models capture dimensions of stigma differently for men and women, associating stereotypes like anger, blame, and pity more with women with mental health conditions than with men. In showing the complex nuances of models\u2019 gendered mental health stigma, we demonstrate that context and overlapping dimensions of identity are important considerations when assessing computational models\u2019 social biases.", + "author": "Inna Lin; Lucille Njoo; Anjalie Field; Ashish Sharma; Katharina Reinecke; Tim Althoff; Yulia Tsvetkov", + "authorids": "/i/inna-lin/; /l/lucille-njoo/; /a/anjalie-field/; /a/ashish-sharma/; /k/katharina-reinecke/; /t/tim-althoff/; /y/yulia-tsvetkov/", + "bibtex": "@inproceedings{lin-etal-2022-gendered,\n title = \"Gendered Mental Health Stigma in Masked Language Models\",\n author = \"Lin, Inna and\n Njoo, Lucille and\n Field, Anjalie and\n Sharma, Ashish and\n Reinecke, Katharina and\n Althoff, Tim and\n Tsvetkov, Yulia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.139/\",\n doi = \"10.18653/v1/2022.emnlp-main.139\",\n pages = \"2152--2170\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.139.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.139/", + "pdf_size": 779481, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3960473282007208982&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington; Paul G. Allen School of Computer Science & Engineering, University of Washington; Stanford University; Paul G. Allen School of Computer Science & Engineering, University of Washington; Paul G. Allen School of Computer Science & Engineering, University of Washington; Paul G. Allen School of Computer Science & Engineering, University of Washington; Paul G. Allen School of Computer Science & Engineering, University of Washington", + "aff_domain": "cs.washington.edu;cs.washington.edu; ;cs.washington.edu;cs.washington.edu;cs.washington.edu;cs.washington.edu", + "email": "cs.washington.edu;cs.washington.edu; ;cs.washington.edu;cs.washington.edu;cs.washington.edu;cs.washington.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;0;0", + "aff_unique_norm": "University of Washington;Stanford University", + "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering;", + "aff_unique_url": "https://www.washington.edu;https://www.stanford.edu", + "aff_unique_abbr": "UW;Stanford", + "aff_campus_unique_index": "0;0;1;0;0;0;0", + "aff_campus_unique": "Seattle;Stanford", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.345", + "title": "Generalization Differences between End-to-End and Neuro-Symbolic Vision-Language Reasoning Systems", + "track": "main", + "status": "finding", + "award": false, + "abstract": "For vision-and-language reasoning tasks, both fully connectionist, end-to-end methods and hybrid, neuro-symbolic methods have achieved high in-distribution performance. In which out-of-distribution settings does each paradigm excel? We investigate this question on both single-image and multi-image visual question-answering through four types of generalization tests: a novel segment-combine test for multi-image queries, contrast set, compositional generalization, and cross-benchmark transfer.Vision-and-language end-to-end trained systems exhibit sizeable performance drops across all these tests. Neuro-symbolic methods suffer even more on cross-benchmark transfer from GQA to VQA, but they show smaller accuracy drops on the other generalization tests and their performance quickly improves by few-shot training. Overall, our results demonstrate the complementary benefits of these two paradigms, and emphasize the importance of using a diverse suite of generalization tests to fully characterize model robustness to distribution shift.", + "author": "Wang Zhu; Jesse Thomason; Robin Jia", + "authorids": "/w/wang-zhu/; /j/jesse-thomason/; /r/robin-jia/", + "bibtex": "@inproceedings{zhu-etal-2022-generalization,\n title = \"Generalization Differences between End-to-End and Neuro-Symbolic Vision-Language Reasoning Systems\",\n author = \"Zhu, Wang and\n Thomason, Jesse and\n Jia, Robin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.345/\",\n doi = \"10.18653/v1/2022.findings-emnlp.345\",\n pages = \"4697--4711\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.345.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.345/", + "pdf_size": 1539156, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15886265842552772783&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "University of Southern California, Los Angeles, CA, USA; University of Southern California, Los Angeles, CA, USA; University of Southern California, Los Angeles, CA, USA", + "aff_domain": "usc.edu;usc.edu;usc.edu", + "email": "usc.edu;usc.edu;usc.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "", + "aff_unique_url": "https://www.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.588", + "title": "Generalizing over Long Tail Concepts for Medical Term Normalization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Medical term normalization consists in mapping a piece of text to a large number of output classes.Given the small size of the annotated datasets and the extremely long tail distribution of the concepts, it is of utmost importance to develop models that are capable to generalize to scarce or unseen concepts.An important attribute of most target ontologies is their hierarchical structure. In this paper we introduce a simple and effective learning strategy that leverages such information to enhance the generalizability of both discriminative and generative models.The evaluation shows that the proposed strategy produces state-of-the-art performance on seen concepts and consistent improvements on unseen ones, allowing also for efficient zero-shot knowledge transfer across text typologies and datasets.", + "author": "Beatrice Portelli; Simone Scaboro; Enrico Santus; Hooman Sedghamiz; Emmanuele Chersoni; Giuseppe Serra", + "authorids": "/b/beatrice-portelli/; /s/simone-scaboro/; /e/enrico-santus/; /h/hooman-sedghamiz/; /e/emmanuele-chersoni/; /g/giuseppe-serra/", + "bibtex": "@inproceedings{portelli-etal-2022-generalizing,\n title = \"Generalizing over Long Tail Concepts for Medical Term Normalization\",\n author = \"Portelli, Beatrice and\n Scaboro, Simone and\n Santus, Enrico and\n Sedghamiz, Hooman and\n Chersoni, Emmanuele and\n Serra, Giuseppe\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.588/\",\n doi = \"10.18653/v1/2022.emnlp-main.588\",\n pages = \"8580--8591\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.588.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.588/", + "pdf_size": 378230, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11392147380157264169&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.558", + "title": "Generate, Discriminate and Contrast: A Semi-Supervised Sentence Representation Learning Framework", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Most sentence embedding techniques heavily rely on expensive human-annotated sentence pairs as the supervised signals. Despite the use of large-scale unlabeled data, the performance of unsupervised methods typically lags far behind that of the supervised counterparts in most downstream tasks. In this work, we propose a semi-supervised sentence embedding framework, GenSE, that effectively leverages large-scale unlabeled data. Our method include three parts: 1) Generate: A generator/discriminator model is jointly trained to synthesize sentence pairs from open-domain unlabeled corpus; 2) Discriminate: Noisy sentence pairs are filtered out by the discriminator to acquire high-quality positive and negative sentence pairs; 3) Contrast: A prompt-based contrastive approach is presented for sentence representation learning with both annotated and synthesized data. Comprehensive experiments show that GenSE achieves an average correlation score of 85.19 on the STS datasets and consistent performance improvement on four domain adaptation tasks, significantly surpassing the state-of-the-art methods and convincingly corroborating its effectiveness and generalization ability.", + "author": "Yiming Chen; Yan Zhang; Bin Wang; Zuozhu Liu; Haizhou Li", + "authorids": "/y/yiming-chen/; /y/yan-zhang/; /b/bin-wang/; /z/zuozhu-liu/; /h/haizhou-li/", + "bibtex": "@inproceedings{chen-etal-2022-generate,\n title = \"Generate, Discriminate and Contrast: A Semi-Supervised Sentence Representation Learning Framework\",\n author = \"Chen, Yiming and\n Zhang, Yan and\n Wang, Bin and\n Liu, Zuozhu and\n Li, Haizhou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.558/\",\n doi = \"10.18653/v1/2022.emnlp-main.558\",\n pages = \"8150--8161\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.558.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.558/", + "pdf_size": 400574, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10643922035208834275&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "National University of Singapore; National University of Singapore; National University of Singapore; Zhejiang University + Angelalign Inc., China; The Chinese University of Hong Kong, Shenzhen, China + Kriston AI Lab, China", + "aff_domain": "u.nus.edu;nus.edu.sg;nus.edu.sg;intl.zju.edu.cn;nus.edu.sg", + "email": "u.nus.edu;nus.edu.sg;nus.edu.sg;intl.zju.edu.cn;nus.edu.sg", + "github": "https://github.com/MatthewCYM/GenSEones", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1+2;3+4", + "aff_unique_norm": "National University of Singapore;Zhejiang University;Angelalign Inc.;The Chinese University of Hong Kong;Kriston AI Lab", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.nus.edu.sg;https://www.zju.edu.cn;;https://www.cuhk.edu.cn;", + "aff_unique_abbr": "NUS;ZJU;;CUHK;", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;1+1;1+1", + "aff_country_unique": "Singapore;China" + }, + { + "id": "2022.emnlp-main.151", + "title": "Generating Information-Seeking Conversations from Unlabeled Documents", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Synthesizing datasets for conversational question answering (CQA) from unlabeled documents remains challenging due to its interactive nature.Moreover, while modeling information needs is an essential key, only few studies have discussed it.In this paper, we introduce a novel framework, **SimSeek**, (**Sim**ulating information-**Seek**ing conversation from unlabeled documents), and compare its two variants.In our baseline, **SimSeek-sym**, a questioner generates follow-up questions upon the predetermined answer by an answerer.On the contrary, **SimSeek-asym** first generates the question and then finds its corresponding answer under the conversational context.Our experiments show that they can synthesize effective training resources for CQA and conversational search tasks.As a result, conversations from **SimSeek-asym** not only make more improvements in our experiments but also are favorably reviewed in a human evaluation.We finally release a large-scale resource of synthetic conversations, **Wiki-SimSeek**, containing 2 million CQA pairs built upon Wikipedia documents.With the dataset, our CQA model achieves the state-of-the-art performance on a recent CQA benchmark, QuAC.The code and dataset are available at https://github.com/naver-ai/simseek", + "author": "Gangwoo Kim; Sungdong Kim; Kang Min Yoo; Jaewoo Kang", + "authorids": "/g/gangwoo-kim/; /s/sungdong-kim/; /k/kang-min-yoo/; /j/jaewoo-kang/", + "bibtex": "@inproceedings{kim-etal-2022-generating,\n title = \"Generating Information-Seeking Conversations from Unlabeled Documents\",\n author = \"Kim, Gangwoo and\n Kim, Sungdong and\n Yoo, Kang Min and\n Kang, Jaewoo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.151/\",\n doi = \"10.18653/v1/2022.emnlp-main.151\",\n pages = \"2362--2378\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.151.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.151/", + "pdf_size": 852990, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12554535928088167830&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Korea University1+NA VER AI Lab2; NA VER AI Lab2+KAIST AI3; NA VER AI Lab2+NA VER CLOV A4; Korea University1", + "aff_domain": "korea.ac.kr;navercorp.com;navercorp.com;korea.ac.kr", + "email": "korea.ac.kr;navercorp.com;navercorp.com;korea.ac.kr", + "github": "github.com/naver-ai/simseek", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1+2;1;0", + "aff_unique_norm": "Korea University;NAVER AI Lab;KAIST;", + "aff_unique_dep": ";AI Lab;AI3;", + "aff_unique_url": "https://www.korea.ac.kr;https://www.naver.com;https://www.kaist.edu;", + "aff_unique_abbr": "KU;NAVER AI Lab;KAIST;", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0", + "aff_country_unique": "South Korea;" + }, + { + "id": "2022.emnlp-main.229", + "title": "Generating Literal and Implied Subquestions to Fact-check Complex Claims", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Verifying political claims is a challenging task, as politicians can use various tactics to subtly misrepresent the facts for their agenda. Existing automatic fact-checking systems fall short here, and their predictions like \u201chalf-true\u201d are not very useful in isolation, since it is unclear which parts of a claim are true and which are not. In this work, we focus on decomposing a complex claim into a comprehensive set of yes-no subquestions whose answers influence the veracity of the claim. We present CLAIMDECOMP, a dataset of decompositions for over 1000 claims. Given a claim and its verification paragraph written by fact-checkers, our trained annotators write subquestions covering both explicit propositions of the original claim and its implicit facets, such as asking about additional political context that changes our view of the claim\u2019s veracity. We study whether state-of-the-art models can generate such subquestions, showing that these models generate reasonable questions to ask, but predicting the comprehensive set of subquestions from the original claim without evidence remains challenging. We further show that these subquestions can help identify relevant evidence to fact-check the full claim and derive the veracity through their answers, suggesting that they can be useful pieces of a fact-checking pipeline.", + "author": "Jifan Chen; Aniruddh Sriram; Eunsol Choi; Greg Durrett", + "authorids": "/j/jifan-chen/; /a/aniruddh-sriram/; /e/eunsol-choi/; /g/greg-durrett/", + "bibtex": "@inproceedings{chen-etal-2022-generating,\n title = \"Generating Literal and Implied Subquestions to Fact-check Complex Claims\",\n author = \"Chen, Jifan and\n Sriram, Aniruddh and\n Choi, Eunsol and\n Durrett, Greg\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.229/\",\n doi = \"10.18653/v1/2022.emnlp-main.229\",\n pages = \"3495--3516\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.229.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.229/", + "pdf_size": 1801651, + "gs_citation": 72, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8516762450182622866&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin", + "aff_domain": "cs.utexas.edu; ; ; ", + "email": "cs.utexas.edu; ; ; ", + "github": "https://jifan-chen.github.io/ClaimDecomp", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "The University of Texas at Austin", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.214", + "title": "Generating Multiple-Length Summaries via Reinforcement Learning for Unsupervised Sentence Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Sentence summarization shortens given texts while maintaining core contents of the texts. Unsupervised approaches have been studied to summarize texts without ground-truth summaries. However, recent unsupervised models are extractive, which remove words from texts and thus they are less flexible than abstractive summarization. In this work, we devise an abstractive model based on reinforcement learning without ground-truth summaries. We formulate the unsupervised summarization based on the Markov decision process with rewards representing the summary quality. To further enhance the summary quality, we develop a multi-summary learning mechanism that generates multiple summaries with varying lengths for a given text, while making the summaries mutually enhance each other. Experimental results show that the proposed model substantially outperforms both abstractive and extractive models, yet frequently generating new words not contained in input texts.", + "author": "Dongmin Hyun; Xiting Wang; Chayoung Park; Xing Xie; Hwanjo Yu", + "authorids": "/d/dongmin-hyun/; /x/xiting-wang/; /c/chayoung-park/; /x/xing-xie/; /h/hwanjo-yu/", + "bibtex": "@inproceedings{hyun-etal-2022-generating,\n title = \"Generating Multiple-Length Summaries via Reinforcement Learning for Unsupervised Sentence Summarization\",\n author = \"Hyun, Dongmin and\n Wang, Xiting and\n Park, Chayoung and\n Xie, Xing and\n Yu, Hwanjo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.214/\",\n doi = \"10.18653/v1/2022.findings-emnlp.214\",\n pages = \"2939--2951\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.214.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.214/", + "pdf_size": 889245, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17431497904586478444&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Pohang University of Science and Technology\u2660; Microsoft Research Asia\u2661; Korea Advanced Institute of Science and Technology\u2663; Microsoft Research Asia\u2661; Pohang University of Science and Technology\u2660\u2020", + "aff_domain": "postech.ac.kr;microsoft.com;kaist.ac.kr;microsoft.com;postech.ac.kr", + "email": "postech.ac.kr;microsoft.com;kaist.ac.kr;microsoft.com;postech.ac.kr", + "github": "https://github.com/dmhyun/MSRP2939", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;1;0", + "aff_unique_norm": "Pohang University of Science and Technology;Microsoft Research;Korea Advanced Institute of Science and Technology", + "aff_unique_dep": ";Microsoft Research;", + "aff_unique_url": "https://www.postech.ac.kr;https://www.microsoft.com/en-us/research/group/asia;https://www.kaist.ac.kr", + "aff_unique_abbr": "POSTECH;MSR Asia;KAIST", + "aff_campus_unique_index": "0;1;1", + "aff_campus_unique": "Pohang;Asia;", + "aff_country_unique_index": "0;1;0;1;0", + "aff_country_unique": "South Korea;China" + }, + { + "id": "2022.emnlp-main.7", + "title": "Generating Natural Language Proofs with Verifier-Guided Search", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Reasoning over natural language is a challenging problem in NLP. In this work, we focus on proof generation: Given a hypothesis and a set of supporting facts, the model generates a proof tree indicating how to derive the hypothesis from supporting facts. Compared to generating the entire proof in one shot, stepwise generation can better exploit the compositionality and generalize to longer proofs but has achieved limited success on real-world data. Existing stepwise methods struggle to generate proof steps that are both logically valid and relevant to the hypothesis. Instead, they tend to hallucinate invalid steps given the hypothesis. In this paper, we present a novel stepwise method, NLProofS (Natural Language Proof Search), which learns to generate relevant steps conditioning on the hypothesis. At the core of our approach, we train an independent verifier to check the validity of the proof steps to prevent hallucination. Instead of generating steps greedily, we search for proofs maximizing a global proof score judged by the verifier. NLProofS achieves state-of-the-art performance on EntailmentBank and RuleTaker. Specifically, it improves the correctness of predicted proofs from 27.7% to 33.3% in the distractor setting of EntailmentBank, demonstrating the effectiveness of NLProofS in generating challenging human-authored proofs.", + "author": "Kaiyu Yang; Jia Deng; Danqi Chen", + "authorids": "/k/kaiyu-yang/; /j/jia-deng/; /d/danqi-chen/", + "bibtex": "@inproceedings{yang-etal-2022-generating,\n title = \"Generating Natural Language Proofs with Verifier-Guided Search\",\n author = \"Yang, Kaiyu and\n Deng, Jia and\n Chen, Danqi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.7/\",\n doi = \"10.18653/v1/2022.emnlp-main.7\",\n pages = \"89--105\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.7.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.7/", + "pdf_size": 496983, + "gs_citation": 76, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17997000557724623012&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science, Princeton University; Department of Computer Science, Princeton University; Department of Computer Science, Princeton University", + "aff_domain": "cs.princeton.edu;cs.princeton.edu;cs.princeton.edu", + "email": "cs.princeton.edu;cs.princeton.edu;cs.princeton.edu", + "github": "https://github.com/princeton-nlp/NLProofS", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Princeton University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.princeton.edu", + "aff_unique_abbr": "Princeton", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.337", + "title": "Generating Textual Adversaries with Minimal Perturbation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Many word-level adversarial attack approaches for textual data have been proposed in recent studies. However, due to the massive search space consisting of combinations of candidate words, the existing approaches face the problem of preserving the semantics of texts when crafting adversarial counterparts. In this paper, we develop a novel attack strategy to find adversarial texts with high similarity to the original texts while introducing minimal perturbation. The rationale is that we expect the adversarial texts with small perturbation can better preserve the semantic meaning of original texts. Experiments show that, compared with state-of-the-art attack approaches, our approach achieves higher success rates and lower perturbation rates in four benchmark datasets.", + "author": "Xingyi Zhao; Lu Zhang; Depeng Xu; Shuhan Yuan", + "authorids": "/x/xingyi-zhao/; /l/lu-zhang/; /d/depeng-xu/; /s/shuhan-yuan/", + "bibtex": "@inproceedings{zhao-etal-2022-generating,\n title = \"Generating Textual Adversaries with Minimal Perturbation\",\n author = \"Zhao, Xingyi and\n Zhang, Lu and\n Xu, Depeng and\n Yuan, Shuhan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.337/\",\n doi = \"10.18653/v1/2022.findings-emnlp.337\",\n pages = \"4599--4606\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.337.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.337/", + "pdf_size": 1289958, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14762807130845105765&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Utah State University; University of Arkansas; University of North Carolina at Charlotte; Utah State University", + "aff_domain": "usu.edu;uark.edu;uncc.edu;usu.edu", + "email": "usu.edu;uark.edu;uncc.edu;usu.edu", + "github": "https://github.com/xingyizhao/TAMPERS", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Utah State University;University of Arkansas;University of North Carolina at Charlotte", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.usu.edu;https://www.uark.edu;https://www.uncc.edu", + "aff_unique_abbr": "USU;UARK;UNCC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Charlotte", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.451", + "title": "Generative Aspect-Based Sentiment Analysis with Contrastive Learning and Expressive Structure", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Generative models have demonstrated impressive results on Aspect-based Sentiment Analysis (ABSA) tasks, particularly for the emerging task of extracting Aspect-Category-Opinion-Sentiment (ACOS) quadruples. However, these models struggle with implicit sentiment expressions, which are commonly observed in opinionated content such as online reviews. In this work, we introduce GEN-SCL-NAT, which consists of two techniques for improved structured generation for ACOS quadruple extraction. First, we propose GEN-SCL, a supervised contrastive learning objective that aids quadruple prediction by encouraging the model to produce input representations that are discriminable across key input attributes, such as sentiment polarity and the existence of implicit opinions and aspects. Second, we introduce GEN-NAT, a new structured generation format that better adapts pre-trained autoregressive encoder-decoder models to extract quadruples in a generative fashion. Experimental results show that GEN-SCL-NAT achieves top performance across three ACOS datasets, averaging 1.48% F1 improvement, with a maximum 1.73% increase on the LAPTOP-L1 dataset. Additionally, we see significant gains on implicit aspect and opinion splits that have been shown as challenging for existing ACOS approaches.", + "author": "Joseph Peper; Lu Wang", + "authorids": "/j/joseph-j-peper/; /l/lu-wang/", + "bibtex": "@inproceedings{peper-wang-2022-generative,\n title = \"Generative Aspect-Based Sentiment Analysis with Contrastive Learning and Expressive Structure\",\n author = \"Peper, Joseph and\n Wang, Lu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.451/\",\n doi = \"10.18653/v1/2022.findings-emnlp.451\",\n pages = \"6089--6095\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.451.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.451/", + "pdf_size": 4616536, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1074532594710602659&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Computer Science and Engineering, University of Michigan; Computer Science and Engineering, University of Michigan", + "aff_domain": "umich.edu;umich.edu", + "email": "umich.edu;umich.edu", + "github": "https://github.com/jpeper/GEN_SCL_NAT", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Michigan", + "aff_unique_dep": "Computer Science and Engineering", + "aff_unique_url": "https://www.umich.edu", + "aff_unique_abbr": "UM", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Ann Arbor", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.470", + "title": "Generative Data Augmentation with Contrastive Learning for Zero-Shot Stance Detection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Stance detection aims to identify whether the author of an opinionated text is in favor of, against, or neutral towards a given target. Remarkable success has been achieved when sufficient labeled training data is available. However, it is labor-intensive to annotate sufficient data and train the model for every new target.Therefore, zero-shot stance detection, aiming at identifying stances of unseen targets with seen targets, has gradually attracted attention. Among them, one of the important challenges is to reduce the domain transfer between seen and unseen targets. To tackle this problem, we propose a generative data augmentation approach to generate training samples containing targets and stances for testing data, and map the real samples and generated synthetic samples into the same embedding space with contrastive learning, then perform the final classification based on the augmented data. We evaluate our proposed model on two benchmark datasets. Experimental results show that our approach achieves state-of-the-art performance on most topics in the task of zero-shot stance detection.", + "author": "Yang Li; Jiawei Yuan", + "authorids": "/y/yang-li/; /j/jiawei-yuan/", + "bibtex": "@inproceedings{li-yuan-2022-generative,\n title = \"Generative Data Augmentation with Contrastive Learning for Zero-Shot Stance Detection\",\n author = \"Li, Yang and\n Yuan, Jiawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.470/\",\n doi = \"10.18653/v1/2022.emnlp-main.470\",\n pages = \"6985--6995\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.470.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.470/", + "pdf_size": 5197578, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4715092655457501086&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Northeast Forestry University; Northeast Forestry University", + "aff_domain": "nefu.edu.cn;nefu.edu.cn", + "email": "nefu.edu.cn;nefu.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Northeast Forestry University", + "aff_unique_dep": "", + "aff_unique_url": "http://www.nefu.edu.cn", + "aff_unique_abbr": "NEFU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.199", + "title": "Generative Entity Typing with Curriculum Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Entity typing aims to assign types to the entity mentions in given texts. The traditional classification-based entity typing paradigm has two unignorable drawbacks: 1) it fails to assign an entity to the types beyond the predefined type set, and 2) it can hardly handle few-shot and zero-shot situations where many long-tail types only have few or even no training instances. To overcome these drawbacks, we propose a novel generative entity typing (GET) paradigm: given a text with an entity mention, the multiple types for the role that the entity plays in the text are generated with a pre-trained language model (PLM). However, PLMs tend to generate coarse-grained types after fine-tuning upon the entity typing dataset. In addition, only the heterogeneous training data consisting of a small portion of human-annotated data and a large portion of auto-generated but low-quality data are provided for model training. To tackle these problems, we employ curriculum learning (CL) to train our GET model on heterogeneous data, where the curriculum could be self-adjusted with the self-paced learning according to its comprehension of the type granularity and data heterogeneity. Our extensive experiments upon the datasets of different languages and downstream tasks justify the superiority of our GET model over the state-of-the-art entity typing models. The code has been released on https://github.com/siyuyuan/GET.", + "author": "Siyu Yuan; Deqing Yang; Jiaqing Liang; Zhixu Li; Jinxi Liu; Jingyue Huang; Yanghua Xiao", + "authorids": "/s/siyu-yuan/; /d/deqing-yang/; /j/jiaqing-liang/; /z/zhixu-li/; /j/jinxi-liu/; /j/jingyue-huang/; /y/yanghua-xiao/", + "bibtex": "@inproceedings{yuan-etal-2022-generative-entity,\n title = \"Generative Entity Typing with Curriculum Learning\",\n author = \"Yuan, Siyu and\n Yang, Deqing and\n Liang, Jiaqing and\n Li, Zhixu and\n Liu, Jinxi and\n Huang, Jingyue and\n Xiao, Yanghua\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.199/\",\n doi = \"10.18653/v1/2022.emnlp-main.199\",\n pages = \"3061--3073\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.199.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.199/", + "pdf_size": 821166, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12015965774275951209&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "https://github.com/siyuyuan/GET", + "project": "", + "author_num": 7 + }, + { + "id": "2022.emnlp-main.676", + "title": "Generative Entity-to-Entity Stance Detection with Knowledge Graph Augmentation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Stance detection is typically framed as predicting the sentiment in a given text towards a target entity. However, this setup overlooks the importance of the source entity, i.e., who is expressing the opinion. In this paper, we emphasize the imperative need for studying interactions among entities when inferring stances. We first introduce a new task, entity-to-entity (E2E) stance detection, which primes models to identify entities in their canonical names and discern stances jointly. To support this study, we curate a new dataset with 10,641 annotations labeled at the sentence level from news articles of different ideological leanings. We present a novel generative framework to allow the generation of canonical names for entities as well as stances among them. We further enhance the model with a graph encoder to summarize entity activities and external knowledge surrounding the entities. Experiments show that our model outperforms strong comparisons by large margins. Further analyses demonstrate the usefulness of E2E stance detection for understanding media quotation and stance landscape as well as inferring entity ideology.", + "author": "Xinliang Frederick Zhang; Nick Beauchamp; Lu Wang", + "authorids": "/x/xinliang-frederick-zhang/; /n/nick-beauchamp/; /l/lu-wang/", + "bibtex": "@inproceedings{zhang-etal-2022-generative,\n title = \"Generative Entity-to-Entity Stance Detection with Knowledge Graph Augmentation\",\n author = \"Zhang, Xinliang Frederick and\n Beauchamp, Nick and\n Wang, Lu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.676/\",\n doi = \"10.18653/v1/2022.emnlp-main.676\",\n pages = \"9950--9969\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.676.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.676/", + "pdf_size": 500451, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14419677955026536112&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff": "Computer Science and Engineering, University of Michigan, Ann Arbor, MI; Department of Political Science, Northeastern University, Boston, MA; Computer Science and Engineering, University of Michigan, Ann Arbor, MI", + "aff_domain": "umich.edu;northeastern.edu;umich.edu", + "email": "umich.edu;northeastern.edu;umich.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Michigan;Northeastern University", + "aff_unique_dep": "Computer Science and Engineering;Department of Political Science", + "aff_unique_url": "https://www.umich.edu;https://www.northeastern.edu", + "aff_unique_abbr": "UM;NEU", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Ann Arbor;Boston", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.1", + "title": "Generative Knowledge Graph Construction: A Review", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Generative Knowledge Graph Construction (KGC) refers to those methods that leverage the sequence-to-sequence framework for building knowledge graphs, which is flexible and can be adapted to widespread tasks. In this study, we summarize the recent compelling progress in generative knowledge graph construction. We present the advantages and weaknesses of each paradigm in terms of different generation targets and provide theoretical insight and empirical analysis. Based on the review, we suggest promising research directions for the future. Our contributions are threefold: (1) We present a detailed, complete taxonomy for the generative KGC methods; (2) We provide a theoretical and empirical analysis of the generative KGC methods; (3) We propose several research directions that can be developed in the future.", + "author": "Hongbin Ye; Ningyu Zhang; Hui Chen; Huajun Chen", + "authorids": "/h/hongbin-ye/; /n/ningyu-zhang/; /h/hui-chen/; /h/huajun-chen/", + "bibtex": "@inproceedings{ye-etal-2022-generative,\n title = \"Generative Knowledge Graph Construction: A Review\",\n author = \"Ye, Hongbin and\n Zhang, Ningyu and\n Chen, Hui and\n Chen, Huajun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.1/\",\n doi = \"10.18653/v1/2022.emnlp-main.1\",\n pages = \"1--17\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.1.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.1/", + "pdf_size": 565529, + "gs_citation": 90, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7906522359090326720&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff": "Zhejiang University & AZFT Joint Lab for Knowledge Engine+Hangzhou Innovation Center, Zhejiang University; Zhejiang University & AZFT Joint Lab for Knowledge Engine+Hangzhou Innovation Center, Zhejiang University; Alibaba Group; Zhejiang University & AZFT Joint Lab for Knowledge Engine+Hangzhou Innovation Center, Zhejiang University", + "aff_domain": "zju.edu.cn;zju.edu.cn;alibaba-inc.com;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;alibaba-inc.com;zju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;1;0+0", + "aff_unique_norm": "Zhejiang University;Alibaba Group", + "aff_unique_dep": "Joint Lab for Knowledge Engine;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "ZJU;Alibaba", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0+0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.42", + "title": "Generative Language Models for Paragraph-Level Question Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Powerful generative models have led to recent progress in question generation (QG). However, it is difficult to measure advances in QG research since there are no standardized resources that allow a uniform comparison among approaches. In this paper, we introduce QG-Bench, a multilingual and multidomain benchmark for QG that unifies existing question answering datasets by converting them to a standard QG setting. It includes general-purpose datasets such as SQuAD for English, datasets from ten domains and two styles, as well as datasets in eight different languages. Using QG-Bench as a reference, we perform an extensive analysis of the capabilities of language models for the task. First, we propose robust QG baselines based on fine-tuning generative language models. Then, we complement automatic evaluation based on standard metrics with an extensive manual evaluation, which in turn sheds light on the difficulty of evaluating QG models. Finally, we analyse both the domain adaptability of these models as well as the effectiveness of multilingual models in languages other than English.QG-Bench is released along with the fine-tuned models presented in the paper (https://github.com/asahi417/lm-question-generation), which are also available as a demo (https://autoqg.net/).", + "author": "Asahi Ushio; Fernando Alva-Manchego; Jose Camacho-Collados", + "authorids": "/a/asahi-ushio/; /f/fernando-alva-manchego/; /j/jose-camacho-collados/", + "bibtex": "@inproceedings{ushio-etal-2022-generative,\n title = \"Generative Language Models for Paragraph-Level Question Generation\",\n author = \"Ushio, Asahi and\n Alva-Manchego, Fernando and\n Camacho-Collados, Jose\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.42/\",\n doi = \"10.18653/v1/2022.emnlp-main.42\",\n pages = \"670--688\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.42.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.42/", + "pdf_size": 1147617, + "gs_citation": 53, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13484093277265430571&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Cardiff NLP, School of Computer Science and Informatics, Cardiff University, UK; Cardiff NLP, School of Computer Science and Informatics, Cardiff University, UK; Cardiff NLP, School of Computer Science and Informatics, Cardiff University, UK", + "aff_domain": "cardiff.ac.uk;cardiff.ac.uk;cardiff.ac.uk", + "email": "cardiff.ac.uk;cardiff.ac.uk;cardiff.ac.uk", + "github": "https://github.com/asahi417/lm-question-generation", + "project": "https://autoqg.net/", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Cardiff University", + "aff_unique_dep": "School of Computer Science and Informatics", + "aff_unique_url": "https://www.cardiff.ac.uk", + "aff_unique_abbr": "Cardiff", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Cardiff", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.92", + "title": "Generative Multi-hop Retrieval", + "track": "main", + "status": "Main", + "award": false, + "abstract": "A common practice for text retrieval is to use an encoder to map the documents and the query to a common vector space and perform a nearest neighbor search (NNS); multi-hop retrieval also often adopts the same paradigm, usually with a modification of iteratively reformulating the query vector so that it can retrieve different documents at each hop. However, such a bi-encoder approach has limitations in multi-hop settings; (1) the reformulated query gets longer as the number of hops increases, which further tightens the embedding bottleneck of the query vector, and (2) it is prone to error propagation. In this paper, we focus on alleviating these limitations in multi-hop settings by formulating the problem in a fully generative way. We propose an encoder-decoder model that performs multi-hop retrieval by simply generating the entire text sequences of the retrieval targets, which means the query and the documents interact in the language model\u2019s parametric space rather than L2 or inner product space as in the bi-encoder approach. Our approach, Generative Multi-hop Retrieval (GMR), consistently achieves comparable or higher performance than bi-encoder models in five datasets while demonstrating superior GPU memory and storage footprint.", + "author": "Hyunji Lee; Sohee Yang; Hanseok Oh; Minjoon Seo", + "authorids": "/h/hyunji-lee/; /s/sohee-yang/; /h/hanseok-oh/; /m/minjoon-seo/", + "bibtex": "@inproceedings{lee-etal-2022-generative,\n title = \"Generative Multi-hop Retrieval\",\n author = \"Lee, Hyunji and\n Yang, Sohee and\n Oh, Hanseok and\n Seo, Minjoon\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.92/\",\n doi = \"10.18653/v1/2022.emnlp-main.92\",\n pages = \"1417--1436\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.92.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.92/", + "pdf_size": 741507, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9058641129509884560&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 7, + "aff": "KAIST AI; KAIST AI; KAIST AI; KAIST AI", + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "https://github.com/amy-hyunji/Generative-Multihop-Retrieval", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology", + "aff_unique_dep": "KAIST AI", + "aff_unique_url": "https://www.kaist.edu", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.231", + "title": "Generative Prompt Tuning for Relation Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Using prompts to explore the knowledge contained within pre-trained language models for downstream tasks has now become an active topic. Current prompt tuning methods mostly convert the downstream tasks to masked language modeling problems by adding cloze-style phrases and mapping all labels to verbalizations with fixed length, which has proven effective for tasks with simple label spaces. However, when applied to relation classification exhibiting complex label spaces, vanilla prompt tuning methods may struggle with label verbalizations with arbitrary lengths due to rigid prompt restrictions. Inspired by the text infilling task for pre-training generative models that can flexibly predict missing spans, we propose a novel generative prompt tuning method to reformulate relation classification as an infilling problem, which frees our approach from limitations of current prompt based approaches and thus fully exploits rich semantics of entity and relation types. In addition, we design entity-guided decoding and discriminative relation scoring to generate and align relations effectively and efficiently during inference. Extensive experiments under fully supervised settings and low-resource settings demonstrate the effectiveness of our approach.", + "author": "Jiale Han; Shuai Zhao; Bo Cheng; Shengkun Ma; Wei Lu", + "authorids": "/j/jiale-han/; /s/shuai-zhao/; /b/bo-cheng/; /s/shengkun-ma/; /w/wei-lu/", + "bibtex": "@inproceedings{han-etal-2022-generative,\n title = \"Generative Prompt Tuning for Relation Classification\",\n author = \"Han, Jiale and\n Zhao, Shuai and\n Cheng, Bo and\n Ma, Shengkun and\n Lu, Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.231/\",\n doi = \"10.18653/v1/2022.findings-emnlp.231\",\n pages = \"3170--3185\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.231.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.231/", + "pdf_size": 1087061, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9689305108399495066&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications; StatNLP Research Group, Singapore University of Technology and Design", + "aff_domain": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;sutd.edu.sg", + "email": "bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;sutd.edu.sg", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Singapore University of Technology and Design", + "aff_unique_dep": "State Key Laboratory of Networking and Switching Technology;StatNLP Research Group", + "aff_unique_url": "http://www.bupt.edu.cn/;https://www.sutd.edu.sg", + "aff_unique_abbr": "BUPT;SUTD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.emnlp-main.132", + "title": "GeoMLAMA: Geo-Diverse Commonsense Probing on Multilingual Pre-Trained Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work has shown that Pre-trained Language Models (PLMs) store the relational knowledge learned from data and utilize it for performing downstream tasks. However, commonsense knowledge across different regions may vary. For instance, the color of bridal dress is white in American weddings whereas it is red in Chinese weddings. In this paper, we introduce a benchmark dataset, Geo-diverse Commonsense Multilingual Language Models Analysis (GeoMLAMA), for probing the diversity of the relational knowledge in multilingual PLMs. GeoMLAMA contains 3125 prompts in English, Chinese, Hindi, Persian, and Swahili, with a wide coverage of concepts shared by people from American, Chinese, Indian, Iranian and Kenyan cultures. We benchmark 11 standard multilingual PLMs on GeoMLAMA. Interestingly, we find that 1) larger multilingual PLMs variants do not necessarily store geo-diverse concepts better than its smaller variant; 2) multilingual PLMs are not intrinsically biased towards knowledge from the Western countries (the United States); 3) the native language of a country may not be the best language to probe its knowledge and 4) a language may better probe knowledge about a non-native country than its native country.", + "author": "Da Yin; Hritik Bansal; Masoud Monajatipoor; Liunian Harold Li; Kai-Wei Chang", + "authorids": "/d/da-yin/; /h/hritik-bansal/; /m/masoud-monajatipoor/; /l/liunian-harold-li/; /k/kai-wei-chang/", + "bibtex": "@inproceedings{yin-etal-2022-geomlama,\n title = \"{G}eo{MLAMA}: Geo-Diverse Commonsense Probing on Multilingual Pre-Trained Language Models\",\n author = \"Yin, Da and\n Bansal, Hritik and\n Monajatipoor, Masoud and\n Li, Liunian Harold and\n Chang, Kai-Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.132/\",\n doi = \"10.18653/v1/2022.emnlp-main.132\",\n pages = \"2039--2055\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.132.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.132/", + "pdf_size": 762767, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9930709622764411821&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Computer Science Department, University of California, Los Angeles; Computer Science Department, University of California, Los Angeles; Computer Science Department, University of California, Los Angeles; Computer Science Department, University of California, Los Angeles; Computer Science Department, University of California, Los Angeles", + "aff_domain": "cs.ucla.edu;cs.ucla.edu;ucla.edu;cs.ucla.edu;cs.ucla.edu", + "email": "cs.ucla.edu;cs.ucla.edu;ucla.edu;cs.ucla.edu;cs.ucla.edu", + "github": "https://github.com/WadeYin9712/GeoMLAMA", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of California, Los Angeles", + "aff_unique_dep": "Computer Science Department", + "aff_unique_url": "https://www.ucla.edu", + "aff_unique_abbr": "UCLA", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.89", + "title": "Geographic Citation Gaps in NLP Research", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In a fair world, people have equitable opportunities to education, to conduct scientific research, to publish, and to get credit for their work, regardless of where they live. However, it is common knowledge among researchers that a vast number of papers accepted at top NLP venues come from a handful of western countries and (lately) China; whereas, very few papers from Africa and South America get published. Similar disparities are also believed to exist for paper citation counts. In the spirit of \u201cwhat we do not measure, we cannot improve\u201d, this work asks a series of questions on the relationship between geographical location and publication success (acceptance in top NLP venues and citation impact). We first created a dataset of 70,000 papers from the ACL Anthology, extracted their meta-information, andgenerated their citation network. We then show that not only are there substantial geographical disparities in paper acceptance and citation but also that these disparities persist even when controlling for a number of variables such as venue of publication and sub-field of NLP. Further, despite some steps taken by the NLP community to improve geographical diversity, we show that the disparity in publication metrics across locations is still on an increasing trend since the early 2000s. We release our code and dataset here: https://github.com/iamjanvijay/acl-cite-net", + "author": "Mukund Rungta; Janvijay Singh; Saif M. Mohammad; Diyi Yang", + "authorids": "/m/mukund-rungta/; /j/janvijay-singh/; /s/saif-mohammad/; /d/diyi-yang/", + "bibtex": "@inproceedings{rungta-etal-2022-geographic,\n title = \"Geographic Citation Gaps in {NLP} Research\",\n author = \"Rungta, Mukund and\n Singh, Janvijay and\n Mohammad, Saif M. and\n Yang, Diyi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.89/\",\n doi = \"10.18653/v1/2022.emnlp-main.89\",\n pages = \"1371--1383\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.89.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.89/", + "pdf_size": 4095656, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17816510868501959699&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "School of Interactive Computing, Georgia Institute of Technology + Stanford University; School of Interactive Computing, Georgia Institute of Technology + Stanford University; National Research Council Canada; Stanford University", + "aff_domain": "gatech.edu;gatech.edu;nrc-cnrc.gc.ca;cs.stanford.edu", + "email": "gatech.edu;gatech.edu;nrc-cnrc.gc.ca;cs.stanford.edu", + "github": "https://github.com/iamjanvijay/acl-cite-net", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;2;1", + "aff_unique_norm": "Georgia Institute of Technology;Stanford University;National Research Council Canada", + "aff_unique_dep": "School of Interactive Computing;;", + "aff_unique_url": "https://www.gatech.edu;https://www.stanford.edu;https://www.nrc-cnrc.gc.ca", + "aff_unique_abbr": "Georgia Tech;Stanford;NRC-CNRC", + "aff_campus_unique_index": "0+1;0+1;1", + "aff_campus_unique": "Atlanta;Stanford;", + "aff_country_unique_index": "0+0;0+0;1;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "2022.findings-emnlp.236", + "title": "Getting the Most out of Simile Recognition", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Simile recognition involves two subtasks: simile sentence classification that discriminates whether a sentence contains simile, and simile component extraction that locates the corresponding objects (i.e., tenors and vehicles).Recent work ignores features other than surface strings and suffers from the data hunger issue.We explore expressive features for this task to help achieve more effective data utilization.In particular, we study two types of features: 1) input-side features that include POS tags, dependency trees and word definitions, and 2) decoding features that capture the interdependence among various decoding decisions.We further construct a model named HGSR, which merges the input-side features as a heterogeneous graph and leverages decoding features via distillation.Experiments show that HGSR significantly outperforms the current state-of-the-art systems and carefully designed baselines, verifying the effectiveness of introduced features. We will release our code upon paper acceptance.", + "author": "Xiaoyue Wang; Linfeng Song; Xin Liu; Chulun Zhou; Hualin Zeng; Jinsong Su", + "authorids": "/x/xiaoyue-wang/; /l/linfeng-song/; /x/xin-liu/; /c/chulun-zhou/; /h/hualin-zeng/; /j/jinsong-su/", + "bibtex": "@inproceedings{wang-etal-2022-getting,\n title = \"Getting the Most out of Simile Recognition\",\n author = \"Wang, Xiaoyue and\n Song, Linfeng and\n Liu, Xin and\n Zhou, Chulun and\n Zeng, Hualin and\n Su, Jinsong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.236/\",\n doi = \"10.18653/v1/2022.findings-emnlp.236\",\n pages = \"3243--3252\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.236.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.236/", + "pdf_size": 1004597, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=500650963388184660&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/DeepLearnXMU/HGSR", + "project": "", + "author_num": 6 + }, + { + "id": "2022.findings-emnlp.327", + "title": "Goal-oriented Vision-and-Dialog Navigation via Reinforcement Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Vision-and-dialog navigation is a recent benchmark for evaluating the AI capabilities of perception, interaction, and decision making. While existing methods developed for this benchmark have demonstrated great successes, they mostly rely on large datasets, where data collection can be a challenge, and the learned policies are not adaptive to domain changes. In this paper, we focus on a new problem, referred to as goal-oriented vision-and-dialog navigation (GVDN), where an agent uses reinforcement learning techniques to compute dialog-navigation policies from trial and error. A robot conducts visual navigation to locate target objects, and can talk to a remote human operator as needed. Our remote human is able to provide guidance on navigation only if the robot correctly conveys its location through dialog. Experiments have been conducted using photo-realistic simulation environments. Results suggest that, our agent outperforms competitive baselines in success rate.", + "author": "Yan Cao; Keting Lu; David DeFazio; Shiqi Zhang", + "authorids": "/y/yan-cao/; /k/keting-lu/; /d/david-defazio/; /s/shiqi-zhang/", + "bibtex": "@inproceedings{cao-etal-2022-goal,\n title = \"Goal-oriented Vision-and-Dialog Navigation via Reinforcement Learning\",\n author = \"Cao, Yan and\n Lu, Keting and\n DeFazio, David and\n Zhang, Shiqi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.327/\",\n doi = \"10.18653/v1/2022.findings-emnlp.327\",\n pages = \"4473--4482\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.327.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.327/", + "pdf_size": 1973546, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14244656265874850576&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "University of Science and Technology of China; Baidu Inc; Binghamton University; Binghamton University", + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;binghamton.edu;binghamton.edu", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;binghamton.edu;binghamton.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;2", + "aff_unique_norm": "University of Science and Technology of China;Baidu;Binghamton University", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.baidu.com;https://www.binghamton.edu", + "aff_unique_abbr": "USTC;Baidu;Binghamton", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.144", + "title": "Gradient-based Constrained Sampling from Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large pretrained language models are successful at generating fluent text but are notoriously hard to controllably sample from. In this work, we study constrained sampling from such language models, i.e., generating text that satisfies user-defined constraints, while maintaining fluency and model\u2019s performance in a downstream task. We propose MuCoLa\u2014a sampling procedure that combines the log-likelihood of the language model with arbitrary (differentiable) constraints in a single energy function, and then generates samples in a non-autoregressive manner. Specifically, it initializes the entire output sequence with noise and follows a Markov chain defined by Langevin Dynamics using the gradients of this energy. We evaluate MuCoLa on text generation with soft and hard constraints as well as their combinations, obtaining significant improvements over competitive baselines for toxicity avoidance, sentiment control, and keyword-guided generation.", + "author": "Sachin Kumar; Biswajit Paria; Yulia Tsvetkov", + "authorids": "/s/sachin-kumar/; /b/biswajit-paria/; /y/yulia-tsvetkov/", + "bibtex": "@inproceedings{kumar-etal-2022-gradient,\n title = \"Gradient-based Constrained Sampling from Language Models\",\n author = \"Kumar, Sachin and\n Paria, Biswajit and\n Tsvetkov, Yulia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.144/\",\n doi = \"10.18653/v1/2022.emnlp-main.144\",\n pages = \"2251--2277\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.144.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.144/", + "pdf_size": 650367, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6194462761914722861&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Language Technologies Institute, Carnegie Mellon University, Pittsburgh PA; Machine Learning Department, Carnegie Mellon University, Pittsburgh PA; Paul G. Allen School of Computer Science & Engineering, University of Washington, Seattle WA", + "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.washington.edu", + "email": "cs.cmu.edu;cs.cmu.edu;cs.washington.edu", + "github": "https://github.com/Sachin19/mucoco/tree/sampling", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Carnegie Mellon University;University of Washington", + "aff_unique_dep": "Language Technologies Institute;Paul G. Allen School of Computer Science & Engineering", + "aff_unique_url": "https://www.cmu.edu;https://www.washington.edu", + "aff_unique_abbr": "CMU;UW", + "aff_campus_unique_index": "0;0;1", + "aff_campus_unique": "Pittsburgh;Seattle", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.25", + "title": "Grafting Pre-trained Models for Multimodal Headline Generation", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Multimodal headline utilizes both video frames and transcripts to generate the natural language title of the videos. Due to a lack of large-scale, manually annotated data, the task of annotating grounded headlines for video is labor intensive and impractical. Previous researches on pre-trained language models and video-language models have achieved significant progress in related downstream tasks. However, none of them can be directly applied to multimodal headline architecture where we need both multimodal encoder and sentence decoder. A major challenge in simply gluing language model and video-language model is the modality balance, which is aimed at combining visual-language complementary abilities. In this paper, we propose a novel approach to graft the video encoder from the pre-trained video-language model on the generative pre-trained language model. We also present a consensus fusion mechanism for the integration of different components, via inter/intra modality relation. Empirically, experiments show that the grafted model achieves strong results on a brand-new dataset collected from real-world applications.", + "author": "Lingfeng Qiao; Chen Wu; Ye Liu; Haoyuan Peng; Di Yin; Bo Ren", + "authorids": "/l/lingfeng-qiao/; /c/chen-wu/; /y/ye-liu/; /h/haoyuan-peng/; /d/di-yin/; /b/bo-ren/", + "bibtex": "@inproceedings{qiao-etal-2022-grafting,\n title = \"Grafting Pre-trained Models for Multimodal Headline Generation\",\n author = \"Qiao, Lingfeng and\n Wu, Chen and\n Liu, Ye and\n Peng, Haoyuan and\n Yin, Di and\n Ren, Bo\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.25/\",\n doi = \"10.18653/v1/2022.emnlp-industry.25\",\n pages = \"244--253\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.25.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.25/", + "pdf_size": 6241229, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10811025510322176478&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Tencent Youtu Lab, Shanghai, China; Tencent Youtu Lab, Shanghai, China; Tencent Youtu Lab, Shanghai, China; Tencent Youtu Lab, Shanghai, China; Tencent Youtu Lab, Shanghai, China; Tencent Youtu Lab, Hefei, China", + "aff_domain": "tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;icloud.com", + "email": "tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;icloud.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Tencent", + "aff_unique_dep": "Youtu Lab", + "aff_unique_url": "https://www.tencent.com", + "aff_unique_abbr": "Tencent", + "aff_campus_unique_index": "0;0;0;0;0;1", + "aff_campus_unique": "Shanghai;Hefei", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.13", + "title": "Grape: Knowledge Graph Enhanced Passage Reader for Open-domain Question Answering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A common thread of open-domain question answering (QA) models employs a retriever-reader pipeline that first retrieves a handful of relevant passages from Wikipedia and then peruses the passages to produce an answer. However, even state-of-the-art readers fail to capture the complex relationships between entities appearing in questions and retrieved passages, leading to answers that contradict the facts. In light of this, we propose a novel knowledge graph enhanced passage reader, namely Grape, to improve the reader performance for open-domain QA. Specifically, for each pair of question and retrieved passage, we first construct a localized bipartite graph, attributed to entity embeddings extracted from the intermediate layer of the reader model. Then, a graph neural network learns relational knowledge while fusing graph and contextual representations into the hidden states of the reader model. Experiments on three open-domain QA benchmarks show Grape can improve the state-of-the-art performance by up to 2.2 exact match score with a negligible overhead increase, with the same retriever and retrieved passages. Our code is publicly available at https://github.com/jumxglhf/GRAPE.", + "author": "Mingxuan Ju; Wenhao Yu; Tong Zhao; Chuxu Zhang; Yanfang Ye", + "authorids": "/m/mingxuan-ju/; /w/wenhao-yu/; /t/tong-zhao/; /c/chuxu-zhang/; /y/yanfang-ye/", + "bibtex": "@inproceedings{ju-etal-2022-grape,\n title = \"Grape: Knowledge Graph Enhanced Passage Reader for Open-domain Question Answering\",\n author = \"Ju, Mingxuan and\n Yu, Wenhao and\n Zhao, Tong and\n Zhang, Chuxu and\n Ye, Yanfang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.13/\",\n doi = \"10.18653/v1/2022.findings-emnlp.13\",\n pages = \"169--181\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.13.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.13/", + "pdf_size": 1406311, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17755994192455944465&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 7, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.findings-emnlp.306", + "title": "Graph Embeddings for Argumentation Quality Assessment", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Argumentation is used by people both internally, by evaluating arguments and counterarguments to make sense of a situation and take a decision, and externally, e.g., in a debate, by exchanging arguments to reach an agreement or to promote an individual position. In this context, the assessment of the quality of the arguments is of extreme importance, as it strongly influences the evaluation of the overall argumentation, impacting on the decision making process. The automatic assessment of the quality of natural language arguments is recently attracting interest in the Argument Mining field. However, the issue of automatically assessing the quality of an argumentation largely remains a challenging unsolved task. Our contribution is twofold: first, we present a novel resource of 402 student persuasive essays, where three main quality dimensions (i.e., cogency, rhetoric, and reasonableness) have been annotated, leading to 1908 arguments tagged with quality facets; second, we address this novel task of argumentation quality assessment proposing a novel neural architecture based on graph embeddings, that combines both the textual features of the natural language arguments and the overall argument graph, i.e., considering also the support and attack relations holding among the arguments. Results on the persuasive essays dataset outperform state-of-the-art and standard baselines\u2019 performance.", + "author": "Santiago Marro; Elena Cabrio; Serena Villata", + "authorids": "/s/santiago-marro/; /e/elena-cabrio/; /s/serena-villata/", + "bibtex": "@inproceedings{marro-etal-2022-graph,\n title = \"Graph Embeddings for Argumentation Quality Assessment\",\n author = \"Marro, Santiago and\n Cabrio, Elena and\n Villata, Serena\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.306/\",\n doi = \"10.18653/v1/2022.findings-emnlp.306\",\n pages = \"4154--4164\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.306.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.306/", + "pdf_size": 1053467, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14902154396047262714&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Universit\u00e9 C\u00f4te d\u2019Azur, CNRS, Inria, I3S, France; Universit\u00e9 C\u00f4te d\u2019Azur, CNRS, Inria, I3S, France; Universit\u00e9 C\u00f4te d\u2019Azur, CNRS, Inria, I3S, France", + "aff_domain": "univ-cotedazur.fr;univ-cotedazur.fr;univ-cotedazur.fr", + "email": "univ-cotedazur.fr;univ-cotedazur.fr;univ-cotedazur.fr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Universit\u00e9 C\u00f4te d\u2019Azur", + "aff_unique_dep": "", + "aff_unique_url": "https://www.univ-cotedazur.fr", + "aff_unique_abbr": "UCA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "France" + }, + { + "id": "2022.emnlp-main.507", + "title": "Graph Hawkes Transformer for Extrapolated Reasoning on Temporal Knowledge Graphs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Temporal Knowledge Graph (TKG) reasoning has attracted increasing attention due to its enormous potential value, and the critical issue is how to model the complex temporal structure information effectively. Recent studies use the method of encoding graph snapshots into hidden vector space and then performing heuristic deductions, which perform well on the task of entity prediction. However, these approaches cannot predict when an event will occur and have the following limitations: 1) there are many facts not related to the query that can confuse the model; 2) there exists information forgetting caused by long-term evolutionary processes. To this end, we propose a Graph Hawkes Transformer (GHT) for both TKG entity prediction and time prediction tasks in the future time. In GHT, there are two variants of Transformer, which capture the instantaneous structural information and temporal evolution information, respectively, and a new relational continuous-time encoding function to facilitate feature evolution with the Hawkes process. Extensive experiments on four public datasets demonstrate its superior performance, especially on long-term evolutionary tasks.", + "author": "Haohai Sun; Shangyi Geng; Jialun Zhong; Han Hu; Kun He", + "authorids": "/h/haohai-sun/; /s/shangyi-geng/; /j/jialun-zhong/; /h/han-hu/; /k/kun-he/", + "bibtex": "@inproceedings{sun-etal-2022-graph,\n title = \"Graph {H}awkes Transformer for Extrapolated Reasoning on Temporal Knowledge Graphs\",\n author = \"Sun, Haohai and\n Geng, Shangyi and\n Zhong, Jialun and\n Hu, Han and\n He, Kun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.507/\",\n doi = \"10.18653/v1/2022.emnlp-main.507\",\n pages = \"7481--7493\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.507.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.507/", + "pdf_size": 596734, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17740991599933218912&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "School of Computer Science and Technology, Huazhong University of Science and Technology; School of Computer Science and Technology, Huazhong University of Science and Technology; School of Computer Science and Technology, Huazhong University of Science and Technology; Microsoft Research Asia; School of Computer Science and Technology, Huazhong University of Science and Technology", + "aff_domain": "hust.edu.cn;hust.edu.cn;hust.edu.cn;microsoft.com;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;hust.edu.cn;microsoft.com;hust.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Huazhong University of Science and Technology;Microsoft Research", + "aff_unique_dep": "School of Computer Science and Technology;Research", + "aff_unique_url": "http://www.hust.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "HUST;MSR Asia", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.102", + "title": "Graph-Based Multilingual Label Propagation for Low-Resource Part-of-Speech Tagging", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Part-of-Speech (POS) tagging is an important component of the NLP pipeline, but many low-resource languages lack labeled data for training. An established method for training a POS tagger in such a scenario is to create a labeled training set by transferring from high-resource languages. In this paper, we propose a novel method for transferring labels from multiple high-resource source to low-resource target languages. We formalize POS tag projection as graph-based label propagation. Given translations of a sentence in multiple languages, we create a graph with words as nodes and alignment links as edges by aligning words for all language pairs. We then propagate node labels from source to target using a Graph Neural Network augmented with transformer layers. We show that our propagation creates training sets that allow us to train POS taggers for a diverse set of languages. When combined with enhanced contextualized embeddings, our method achieves a new state-of-the-art for unsupervised POS tagging of low-resource languages.", + "author": "Ayyoob Imani; Silvia Severini; Masoud Jalili Sabet; Fran\u00e7ois Yvon; Hinrich Sch\u00fctze", + "authorids": "/a/ayyoob-imani/; /s/silvia-severini/; /m/masoud-jalili-sabet/; /f/francois-yvon/; /h/hinrich-schutze/", + "bibtex": "@inproceedings{imanigooghari-etal-2022-graph,\n title = \"Graph-Based Multilingual Label Propagation for Low-Resource Part-of-Speech Tagging\",\n author = {Imani, Ayyoob and\n Severini, Silvia and\n Jalili Sabet, Masoud and\n Yvon, Fran{\\c{c}}ois and\n Sch{\\\"u}tze, Hinrich},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.102/\",\n doi = \"10.18653/v1/2022.emnlp-main.102\",\n pages = \"1577--1589\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.102.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.102/", + "pdf_size": 427625, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1484738590685415871&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 17, + "aff": "Center for Information and Language Processing (CIS), LMU Munich, Germany; Center for Information and Language Processing (CIS), LMU Munich, Germany; Center for Information and Language Processing (CIS), LMU Munich, Germany; Universit\u00e9 Paris-Saclay, CNRS, LISN, France; Center for Information and Language Processing (CIS), LMU Munich, Germany", + "aff_domain": "cis.lmu.de;cis.lmu.de;cis.lmu.de;limsi.fr; ", + "email": "cis.lmu.de;cis.lmu.de;cis.lmu.de;limsi.fr; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "LMU Munich;Universit\u00e9 Paris-Saclay", + "aff_unique_dep": "Center for Information and Language Processing (CIS);CNRS, LISN", + "aff_unique_url": "https://www.lmu.de;https://www.universite-paris-saclay.fr", + "aff_unique_abbr": "LMU;UPS", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Munich;", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "Germany;France" + }, + { + "id": "2022.emnlp-main.702", + "title": "Graph-Induced Transformers for Efficient Multi-Hop Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "A graph is a suitable data structure to represent the structural information of text. Recently, multi-hop question answering (MHQA) tasks, which require inter-paragraph/sentence linkages, have come to exploit such properties of a graph. Previous approaches to MHQA relied on leveraging the graph information along with the pre-trained language model (PLM) encoders. However, this trend exhibits the following drawbacks: (i) sample inefficiency while training in a low-resource setting; (ii) lack of reusability due to changes in the model structure or input. Our work proposes the Graph-Induced Transformer (GIT) that applies graph-derived attention patterns directly into a PLM, without the need to employ external graph modules. GIT can leverage the useful inductive bias of graphs while retaining the unperturbed Transformer structure and parameters. Our experiments on HotpotQA successfully demonstrate both the sample efficient characteristic of GIT and its capacity to replace the graph modules while preserving model performance.", + "author": "Giwon Hong; Jeonghwan Kim; Junmo Kang; Sung-Hyon Myaeng", + "authorids": "/g/giwon-hong/; /j/jeonghwan-kim/; /j/junmo-kang/; /s/sung-hyon-myaeng/", + "bibtex": "@inproceedings{hong-etal-2022-graph,\n title = \"Graph-Induced Transformers for Efficient Multi-Hop Question Answering\",\n author = \"Hong, Giwon and\n Kim, Jeonghwan and\n Kang, Junmo and\n Myaeng, Sung-Hyon\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.702/\",\n doi = \"10.18653/v1/2022.emnlp-main.702\",\n pages = \"10288--10294\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.702.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.702/", + "pdf_size": 455598, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4096433852055465454&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "School of Computing, KAIST; School of Computing, KAIST; School of Interactive Computing, Georgia Institute of Technology + School of Computing, KAIST; School of Computing, KAIST", + "aff_domain": "kaist.ac.kr;kaist.ac.kr;gatech.edu;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;gatech.edu;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1+0;0", + "aff_unique_norm": "KAIST;Georgia Institute of Technology", + "aff_unique_dep": "School of Computing;School of Interactive Computing", + "aff_unique_url": "https://www.kaist.ac.kr;https://www.gatech.edu", + "aff_unique_abbr": "KAIST;Georgia Tech", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Atlanta", + "aff_country_unique_index": "0;0;1+0;0", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "2022.emnlp-main.5", + "title": "Graph-based Model Generation for Few-Shot Relation Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Few-shot relation extraction (FSRE) has been a challenging problem since it only has a handful of training instances. Existing models follow a \u2018one-for-all\u2019 scheme where one general large model performs all individual N-way-K-shot tasks in FSRE, which prevents the model from achieving the optimal point on each task. In view of this, we propose a model generation framework that consists of one general model for all tasks and many tiny task-specific models for each individual task. The general model generates and passes the universal knowledge to the tiny models which will be further fine-tuned when performing specific tasks. In this way, we decouple the complexity of the entire task space from that of all individual tasks while absorbing the universal knowledge.Extensive experimental results on two public datasets demonstrate that our framework reaches a new state-of-the-art performance for FRSE tasks. Our code is available at: https://github.com/NLPWM-WHU/GM_GEN.", + "author": "Wanli Li; Tieyun Qian", + "authorids": "/w/wanli-li/; /t/tieyun-qian/", + "bibtex": "@inproceedings{li-qian-2022-graph,\n title = \"Graph-based Model Generation for Few-Shot Relation Extraction\",\n author = \"Li, Wanli and\n Qian, Tieyun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.5/\",\n doi = \"10.18653/v1/2022.emnlp-main.5\",\n pages = \"62--71\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.5.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.5/", + "pdf_size": 460335, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3544544645466274377&as_sdt=8000005&sciodt=0,19&hl=en", + "gs_version_total": 5, + "aff": "School of Computer Science, Wuhan University, China; School of Computer Science, Wuhan University, China", + "aff_domain": "whu.edu.cn;whu.edu.cn", + "email": "whu.edu.cn;whu.edu.cn", + "github": "https://github.com/NLPWM-WHU/GM_GEN", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Wuhan University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.whu.edu.cn", + "aff_unique_abbr": "WHU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Wuhan", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.394", + "title": "GraphQ IR: Unifying the Semantic Parsing of Graph Query Languages with One Intermediate Representation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Subject to the huge semantic gap between natural and formal languages, neural semantic parsing is typically bottlenecked by its complexity of dealing with both input semantics and output syntax. Recent works have proposed several forms of supplementary supervision but none is generalized across multiple formal languages. This paper proposes a unified intermediate representation for graph query languages, named GraphQ IR. It has a natural-language-like expression that bridges the semantic gap and formally defined syntax that maintains the graph structure. Therefore, a neural semantic parser can more precisely convert user queries into GraphQ IR, which can be later losslessly compiled into various downstream graph query languages. Extensive experiments on several benchmarks including KQA Pro, Overnight, GrailQA, and MetaQA-Cypher under the standard i.i.d., out-of-distribution, and low-resource settings validate GraphQ IR\u2019s superiority over the previous state-of-the-arts with a maximum 11% accuracy improvement.", + "author": "Lunyiu Nie; Shulin Cao; Jiaxin Shi; Jiuding Sun; Qi Tian; Lei Hou; Juanzi Li; Jidong Zhai", + "authorids": "/l/lunyiu-nie/; /s/shulin-cao/; /j/jiaxin-shi/; /j/jiuding-sun/; /q/qi-tian/; /l/lei-hou/; /j/juanzi-li/; /j/jidong-zhai/", + "bibtex": "@inproceedings{nie-etal-2022-graphq,\n title = \"{G}raph{Q} {IR}: Unifying the Semantic Parsing of Graph Query Languages with One Intermediate Representation\",\n author = \"Nie, Lunyiu and\n Cao, Shulin and\n Shi, Jiaxin and\n Sun, Jiuding and\n Tian, Qi and\n Hou, Lei and\n Li, Juanzi and\n Zhai, Jidong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.394/\",\n doi = \"10.18653/v1/2022.emnlp-main.394\",\n pages = \"5848--5865\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.394.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.394/", + "pdf_size": 1114392, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1456016523924494143&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Technology, Tsinghua University; Department of Computer Science and Technology, Tsinghua University; Huawei Cloud Computing Technologies Co., Ltd.; Department of Computer Science and Technology, Tsinghua University; Huawei Cloud Computing Technologies Co., Ltd.; Department of Computer Science and Technology, Tsinghua University; Department of Computer Science and Technology, Tsinghua University; Department of Computer Science and Technology, Tsinghua University", + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;gmail.com;mails.tsinghua.edu.cn;huawei.com;tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;gmail.com;mails.tsinghua.edu.cn;huawei.com;tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;0;1;0;0;0", + "aff_unique_norm": "Tsinghua University;Huawei Cloud Computing Technologies Co., Ltd.", + "aff_unique_dep": "Department of Computer Science and Technology;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.huawei.com/en/cloud", + "aff_unique_abbr": "THU;Huawei Cloud", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.155", + "title": "Ground-Truth Labels Matter: A Deeper Look into Input-Label Demonstrations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite recent explosion of interests in in-context learning, the underlying mechanism and the precise impact of the quality of demonstrations remain elusive.Intuitively, ground-truth labels should have as much impact in in-context learning (ICL) as supervised learning, but recent work reported that the input-label correspondence is significantly less important than previously thought.Intrigued by this counter-intuitive observation, we re-examine the importance of ground-truth labels in in-context learning.With the introduction of two novel metrics, namely Label-Correctness Sensitivity and Ground-truth Label Effect Ratio (GLER), we were able to conduct quantifiable analysis on the impact of ground-truth label demonstrations.Through extensive analyses, we find that the correct input-label mappings can have varying impacts on the downstream in-context learning performances, depending on the experimental configuration.Through additional studies, we identify key components, such as the verbosity of prompt templates and the language model size, as the controlling factor to achieve more noise-resilient ICL.", + "author": "Kang Min Yoo; Junyeob Kim; Hyuhng Joon Kim; Hyunsoo Cho; Hwiyeol Jo; Sang-Woo Lee; Sang-goo Lee; Taeuk Kim", + "authorids": "/k/kang-min-yoo/; /j/junyeob-kim/; /h/hyuhng-joon-kim/; /h/hyunsoo-cho/; /h/hwiyeol-jo/; /s/sang-woo-lee/; /s/sang-goo-lee/; /t/taeuk-kim/", + "bibtex": "@inproceedings{yoo-etal-2022-ground,\n title = \"Ground-Truth Labels Matter: A Deeper Look into Input-Label Demonstrations\",\n author = \"Yoo, Kang Min and\n Kim, Junyeob and\n Kim, Hyuhng Joon and\n Cho, Hyunsoo and\n Jo, Hwiyeol and\n Lee, Sang-Woo and\n Lee, Sang-goo and\n Kim, Taeuk\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.155/\",\n doi = \"10.18653/v1/2022.emnlp-main.155\",\n pages = \"2422--2437\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.155.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.155/", + "pdf_size": 1303650, + "gs_citation": 92, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17319458254197174500&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.547", + "title": "Grounded Keys-to-Text Generation: Towards Factual Open-Ended Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large pre-trained language models have recently enabled open-ended generation frameworks (e.g., prompt-to-text NLG) to tackle a variety of tasks going beyond the traditional data-to-text generation. While this framework is more general, it is under-specified and often leads to a lack of controllability restricting their real-world usage. We propose a new grounded keys-to-text generation task: the task is to generate a factual description about an entity given a set of guiding keys, and grounding passages. To address this task, we introduce a new dataset, called EntDeGen. Inspired by recent QA-based evaluation measures, we propose an automatic metric, MAFE, for factual correctness of generated descriptions. Our EntDescriptor model is equipped with strong rankers to fetch helpful passages and generate entity descriptions. Experimental result shows a good correlation (60.14) between our proposed metric and human judgments of factuality. Our rankers significantly improved the factual correctness of generated descriptions (15.95% and 34.51% relative gains in recall and precision). Finally, our ablation study highlights the benefit of combining keys and groundings.", + "author": "Faeze Brahman; Baolin Peng; Michel Galley; Sudha Rao; Bill Dolan; Snigdha Chaturvedi; Jianfeng Gao", + "authorids": "/f/faeze-brahman/; /b/baolin-peng/; /m/michel-galley/; /s/sudha-rao/; /w/william-b-dolan/; /s/snigdha-chaturvedi/; /j/jianfeng-gao/", + "bibtex": "@inproceedings{brahman-etal-2022-grounded,\n title = \"Grounded Keys-to-Text Generation: Towards Factual Open-Ended Generation\",\n author = \"Brahman, Faeze and\n Peng, Baolin and\n Galley, Michel and\n Rao, Sudha and\n Dolan, Bill and\n Chaturvedi, Snigdha and\n Gao, Jianfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.547/\",\n doi = \"10.18653/v1/2022.findings-emnlp.547\",\n pages = \"7397--7413\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.547.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.547/", + "pdf_size": 4278336, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10257618596458228699&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Allen Institute for Artificial Intelligence+Paul G. Allen School of Computer Science & Engineering, University of Washington; Microsoft Research; Microsoft Research; Microsoft Research; Microsoft Research; UNC Chapel Hill; Microsoft Research", + "aff_domain": "allenai.org; ; ; ; ; ; ", + "email": "allenai.org; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2;2;2;2;3;2", + "aff_unique_norm": "Allen Institute for Artificial Intelligence;University of Washington;Microsoft Corporation;University of North Carolina at Chapel Hill", + "aff_unique_dep": ";Paul G. Allen School of Computer Science & Engineering;Microsoft Research;", + "aff_unique_url": "https://allenai.org;https://www.washington.edu;https://www.microsoft.com/en-us/research;https://www.unc.edu", + "aff_unique_abbr": "AI2;UW;MSR;UNC", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Seattle;Chapel Hill", + "aff_country_unique_index": "0+0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.263", + "title": "Group is better than individual: Exploiting Label Topologies and Label Relations for Joint Multiple Intent Detection and Slot Filling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent joint multiple intent detection and slot filling models employ label embeddings to achieve the semantics-label interactions.However, they treat all labels and label embeddings as uncorrelated individuals, ignoring the dependencies among them. Besides, they conduct the decoding for the two tasks independently, without leveraging the correlations between them.Therefore, in this paper, we first construct a Heterogeneous Label Graph (HLG) containing two kinds of topologies: (1) statistical dependencies based on labels\u2019 co-occurrence patterns and hierarchies in slot labels; (2) rich relations among the label nodes.Then we propose a novel model termed ReLa-Net.It can capture beneficial correlations among the labels from HLG.The label correlations are leveraged to enhance semantic-label interactions. Moreover, we also propose the label-aware inter-dependent decoding mechanism to further exploit the label correlations for decoding. Experiment results show that our ReLa-Net significantly outperforms previous models.Remarkably, ReLa-Net surpasses the previous best model by over 20% in terms of overall accuracy on MixATIS dataset.", + "author": "Bowen Xing; Ivor Tsang", + "authorids": "/b/bowen-xing/; /i/ivor-tsang/", + "bibtex": "@inproceedings{xing-tsang-2022-group,\n title = \"Group is better than individual: Exploiting Label Topologies and Label Relations for Joint Multiple Intent Detection and Slot Filling\",\n author = \"Xing, Bowen and\n Tsang, Ivor\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.263/\",\n doi = \"10.18653/v1/2022.emnlp-main.263\",\n pages = \"3964--3975\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.263.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.263/", + "pdf_size": 3981903, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4124232576595291769&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Australian Artificial Intelligence Institute, University of Technology Sydney, Australia+Centre for Frontier Artificial Intelligence Research, A*STAR, Singapore; Centre for Frontier Artificial Intelligence Research, A*STAR, Singapore+Australian Artificial Intelligence Institute, University of Technology Sydney, Australia", + "aff_domain": "gmail.com;gmail.com", + "email": "gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1+0", + "aff_unique_norm": "University of Technology Sydney;A*STAR", + "aff_unique_dep": "Australian Artificial Intelligence Institute;Centre for Frontier Artificial Intelligence Research", + "aff_unique_url": "https://www.uts.edu.au;https://www.a-star.edu.sg", + "aff_unique_abbr": "UTS;A*STAR", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Sydney;", + "aff_country_unique_index": "0+1;1+0", + "aff_country_unique": "Australia;Singapore" + }, + { + "id": "2022.findings-emnlp.248", + "title": "Guiding Abstractive Dialogue Summarization with Content Planning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Abstractive dialogue summarization has recently been receiving more attention. We propose a coarse-to-fine model for generating abstractive dialogue summaries, and introduce a fact-aware reinforcement learning (RL) objective that improves the fact consistency between the dialogue and the generated summary. Initially, the model generates the predicate-argument spans of the dialogue, and then generates the final summary through a fact-aware RL objective. Extensive experiments and analysis on two benchmark datasets demonstrate that our proposed method effectively improves the quality of the generated summary, especially in coherence and consistency.", + "author": "Ye Wang; Xiaojun Wan; Zhiping Cai", + "authorids": "/y/ye-wang/; /x/xiaojun-wan/; /z/zhiping-cai/", + "bibtex": "@inproceedings{wang-etal-2022-guiding,\n title = \"Guiding Abstractive Dialogue Summarization with Content Planning\",\n author = \"Wang, Ye and\n Wan, Xiaojun and\n Cai, Zhiping\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.248/\",\n doi = \"10.18653/v1/2022.findings-emnlp.248\",\n pages = \"3408--3413\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.248.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.248/", + "pdf_size": 387058, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1828940097275725655&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "College of Computer, National University of Defense Technology; Wangxuan Institute of Computer Technology, Peking University+Center for Data Science, Peking University+The MOE Key Laboratory of Computational Linguistics, Peking University; College of Computer, National University of Defense Technology", + "aff_domain": "nudt.edu.cn;pku.edu.cn;nudt.edu.cn", + "email": "nudt.edu.cn;pku.edu.cn;nudt.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+1+1;0", + "aff_unique_norm": "National University of Defense Technology;Peking University", + "aff_unique_dep": "College of Computer;Wangxuan Institute of Computer Technology", + "aff_unique_url": "http://www.nudt.edu.cn/;http://www.pku.edu.cn", + "aff_unique_abbr": "NUDT;PKU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0+0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.32", + "title": "Guiding Neural Entity Alignment with Compatibility", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Entity Alignment (EA) aims to find equivalent entities between two Knowledge Graphs (KGs). While numerous neural EA models have been devised, they are mainly learned using labelled data only. In this work, we argue that different entities within one KG should have compatible counterparts in the other KG due to the potential dependencies among the entities. Making compatible predictions thus should be one of the goals of training an EA model along with fitting the labelled data: this aspect however is neglected in current methods. To power neural EA models with compatibility, we devise a training framework by addressing three problems: (1) how to measure the compatibility of an EA model; (2) how to inject the property of being compatible into an EA model; (3) how to optimise parameters of the compatibility model. Extensive experiments on widely-used datasets demonstrate the advantages of integrating compatibility within EA models. In fact, state-of-the-art neural EA models trained within our framework using just 5% of the labelled data can achieve comparable effectiveness with supervised training using 20% of the labelled data.", + "author": "Bing Liu; Harrisen Scells; Wen Hua; Guido Zuccon; Genghong Zhao; Xia Zhang", + "authorids": "/b/bing-liu/; /h/harrisen-scells/; /w/wen-hua/; /g/guido-zuccon/; /g/genghong-zhao/; /x/xia-zhang/", + "bibtex": "@inproceedings{liu-etal-2022-guiding,\n title = \"Guiding Neural Entity Alignment with Compatibility\",\n author = \"Liu, Bing and\n Scells, Harrisen and\n Hua, Wen and\n Zuccon, Guido and\n Zhao, Genghong and\n Zhang, Xia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.32/\",\n doi = \"10.18653/v1/2022.emnlp-main.32\",\n pages = \"491--504\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.32.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.32/", + "pdf_size": 564704, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16943106871226956868&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 7, + "aff": "The University of Queensland, Australia; The University of Queensland, Australia; The University of Queensland, Australia; The University of Queensland, Australia; Neusoft Research of Intelligent Healthcare Technology, Co. Ltd., China; Neusoft Corporation, China", + "aff_domain": "uq.edu.au;uq.edu.au;uq.edu.au;uq.edu.au;neusoft.com;neusoft.com", + "email": "uq.edu.au;uq.edu.au;uq.edu.au;uq.edu.au;neusoft.com;neusoft.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;2", + "aff_unique_norm": "The University of Queensland;Neusoft Research of Intelligent Healthcare Technology;Neusoft Corporation", + "aff_unique_dep": ";Intelligent Healthcare Technology;", + "aff_unique_url": "https://www.uq.edu.au;;http://www.neusoft.com", + "aff_unique_abbr": "UQ;;Neusoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;1", + "aff_country_unique": "Australia;China" + }, + { + "id": "2022.findings-emnlp.541", + "title": "Guiding Neural Machine Translation with Semantic Kernels", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Machine Translation task has made great progress with the help of auto-regressive decoding paradigm and Transformer architecture. In this paradigm, though the encoder can obtain global source representations, the decoder can only use translation history to determine the current word. Previous promising works attempted to address this issue by applying a draft or a fixed-length semantic embedding as target-side global information. However, these methods either degrade model efficiency or show limitations in expressing semantics. Motivated by Functional Equivalence Theory, we extract several semantic kernels from a source sentence, each of which can express one semantic segment of the original sentence. Together, these semantic kernels can capture global semantic information, and we project them into target embedding space to guide target sentence generation. We further force our model to use semantic kernels at each decoding step through an adaptive mask algorithm. Empirical studies on various machine translation benchmarks show that our approach gains approximately an improvement of 1 BLEU score on most benchmarks over the Transformer baseline and about 1.7 times faster than previous works on average at inference time.", + "author": "Ping Guo; Yue Hu; Xiangpeng Wei; Yubing Ren; Yunpeng Li; Luxi Xing; Yuqiang Xie", + "authorids": "/p/ping-guo/; /y/yue-hu/; /x/xiangpeng-wei/; /y/yubing-ren/; /y/yunpeng-li/; /l/luxi-xing/; /y/yuqiang-xie/", + "bibtex": "@inproceedings{guo-etal-2022-guiding,\n title = \"Guiding Neural Machine Translation with Semantic Kernels\",\n author = \"Guo, Ping and\n Hu, Yue and\n Wei, Xiangpeng and\n Ren, Yubing and\n Li, Yunpeng and\n Xing, Luxi and\n Xie, Yuqiang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.541/\",\n doi = \"10.18653/v1/2022.findings-emnlp.541\",\n pages = \"7316--7327\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.541.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.541/", + "pdf_size": 853922, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12950032221022274698&as_sdt=1005&sciodt=0,4&hl=en", + "gs_version_total": 2, + "aff": "Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Alibaba DAMO Academy, Hangzhou, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China; Alibaba Group, Beijing, China; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China+School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China", + "aff_domain": "iie.ac.cn;iie.ac.cn;gmail.com;iie.ac.cn;iie.ac.cn;gmail.com;iie.ac.cn", + "email": "iie.ac.cn;iie.ac.cn;gmail.com;iie.ac.cn;iie.ac.cn;gmail.com;iie.ac.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;2;0+1;0+1;3;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Alibaba DAMO Academy;Alibaba Group", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://damo.alibaba.com;https://www.alibaba.com", + "aff_unique_abbr": "CAS;UCAS;Alibaba DAMO;Alibaba", + "aff_campus_unique_index": "0+0;0+0;1;0+0;0+0;0;0+0", + "aff_campus_unique": "Beijing;Hangzhou", + "aff_country_unique_index": "0+0;0+0;0;0+0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.526", + "title": "Guiding Neural Story Generation with Reader Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Automated storytelling has long captured the attention of researchers for the ubiquity of narratives in everyday life. However, it is challenging to maintain coherence and stay on-topictoward a specific ending when generating narratives with neural language models. In this paper, we introduce Story generation with ReaderModels (StoRM), a framework in which areader model is used to reason about the storyshould progress. A reader model infers whata human reader believes about the concepts,entities, and relations about the fictional storyworld. We show how an explicit reader modelrepresented as a knowledge graph affords the storycoherence and provides controllability in theform of achieving a given story world stategoal. Experiments show that our model produces significantly more coherent and on-topicstories, outperforming baselines in dimensionsincluding plot plausibility and staying on topic", + "author": "Xiangyu Peng; Kaige Xie; Amal Alabdulkarim; Harshith Kayam; Samihan Dani; Mark Riedl", + "authorids": "/x/xiangyu-peng/; /k/kaige-xie/; /a/amal-alabdulkarim/; /h/harshith-kayam/; /s/samihan-dani/; /m/mark-riedl/", + "bibtex": "@inproceedings{peng-etal-2022-guiding,\n title = \"Guiding Neural Story Generation with Reader Models\",\n author = \"Peng, Xiangyu and\n Xie, Kaige and\n Alabdulkarim, Amal and\n Kayam, Harshith and\n Dani, Samihan and\n Riedl, Mark\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.526/\",\n doi = \"10.18653/v1/2022.findings-emnlp.526\",\n pages = \"7087--7111\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.526.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.526/", + "pdf_size": 2040661, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12892222711067237086&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology", + "aff_domain": "gatech.edu;gatech.edu;gatech.edu;gatech.edu;gatech.edu;gatech.edu", + "email": "gatech.edu;gatech.edu;gatech.edu;gatech.edu;gatech.edu;gatech.edu", + "github": "https://github.com/xiangyu-peng/Reader_Model", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Georgia Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://www.gatech.edu", + "aff_unique_abbr": "Georgia Tech", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.774", + "title": "GuoFeng: A Benchmark for Zero Pronoun Recovery and Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The phenomenon of zero pronoun (ZP) has attracted increasing interest in the machine translation (MT) community due to its importance and difficulty. However, previous studies generally evaluate the quality of translating ZPs with BLEU scores on MT testsets, which is not expressive or sensitive enough for accurate assessment. To bridge the data and evaluation gaps, we propose a benchmark testset for target evaluation on Chinese-English ZP translation. The human-annotated testset covers five challenging genres, which reveal different characteristics of ZPs for comprehensive evaluation. We systematically revisit eight advanced models on ZP translation and identify current challenges for future exploration. We release data, code, models and annotation guidelines, which we hope can significantly promote research in this field (https://github.com/longyuewangdcu/mZPRT).", + "author": "Mingzhou Xu; Longyue Wang; Derek F. Wong; Hongye Liu; Linfeng Song; Lidia S. Chao; Shuming Shi; Zhaopeng Tu", + "authorids": "/m/mingzhou-xu/; /l/longyue-wang/; /d/derek-f-wong/; /h/hongye-liu/; /l/linfeng-song/; /l/lidia-s-chao/; /s/shuming-shi/; /z/zhaopeng-tu/", + "bibtex": "@inproceedings{xu-etal-2022-guofeng,\n title = \"{G}uo{F}eng: A Benchmark for Zero Pronoun Recovery and Translation\",\n author = \"Xu, Mingzhou and\n Wang, Longyue and\n Wong, Derek F. and\n Liu, Hongye and\n Song, Linfeng and\n Chao, Lidia S. and\n Shi, Shuming and\n Tu, Zhaopeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.774/\",\n doi = \"10.18653/v1/2022.emnlp-main.774\",\n pages = \"11266--11278\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.774.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.774/", + "pdf_size": 656571, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6338509033075347778&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Tencent AI Lab; University of Macau; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; University of Macau; Tencent AI Lab; Tencent AI Lab", + "aff_domain": "tencent.com;gmail.com;um.edu.com;tencent.com;tencent.com;um.edu.com;tencent.com;tencent.com", + "email": "tencent.com;gmail.com;um.edu.com;tencent.com;tencent.com;um.edu.com;tencent.com;tencent.com", + "github": "https://github.com/longyuewangdcu/mZPRT", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;0;1;0;0", + "aff_unique_norm": "Tencent;University of Macau", + "aff_unique_dep": "Tencent AI Lab;", + "aff_unique_url": "https://ai.tencent.com;https://www.um.edu.mo", + "aff_unique_abbr": "Tencent AI Lab;UM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;0;1;0;0", + "aff_country_unique": "China;Macau" + }, + { + "id": "2022.findings-emnlp.165", + "title": "HARALD: Augmenting Hate Speech Data Sets with Real Data", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The successful completion of the hate speech detection task hinges upon the availability of rich and variable labeled data, which is hard to obtain. In this work, we present a new approach for data augmentation that uses as input real unlabelled data, which is carefully selected from online platforms where invited hate speech is abundant. We show that by harvesting and processing this data (in an automatic manner), one can augment existing manually-labeled datasets to improve the classification performance of hate speech classification models. We observed an improvement in F1-score ranging from 2.7% and up to 9.5%, depending on the task (in- or cross-domain) and the model used.", + "author": "Tal Ilan; Dan Vilenchik", + "authorids": "/t/tal-ilan/; /d/dan-vilenchik/", + "bibtex": "@inproceedings{ilan-vilenchik-2022-harald,\n title = \"{HARALD}: Augmenting Hate Speech Data Sets with Real Data\",\n author = \"Ilan, Tal and\n Vilenchik, Dan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.165/\",\n doi = \"10.18653/v1/2022.findings-emnlp.165\",\n pages = \"2241--2248\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.165.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.165/", + "pdf_size": 3339084, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10596236562611910279&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Department of Ind. Eng. Manag., Ben-Gurion University of the Negev; School of Comput. Electr. Eng., Ben-Gurion University of the Negev", + "aff_domain": "gmail.com;bgu.ac.il", + "email": "gmail.com;bgu.ac.il", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Ben-Gurion University of the Negev", + "aff_unique_dep": "Department of Industrial Engineering and Management", + "aff_unique_url": "https://in.bgu.ac.il", + "aff_unique_abbr": "BGU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.findings-emnlp.130", + "title": "HCL-TAT: A Hybrid Contrastive Learning Method for Few-shot Event Detection with Task-Adaptive Threshold", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Event detection has been suffering from constantly emerging event types with lack of sufficient data. Existing works formulate the new problem as few-shot event detection (FSED), and employ two-stage or unified models based on meta-learning to address the problem. However, these methods fall far short of expectations due to: (i) insufficient learning of discriminative representations in low-resource scenarios, and (ii) representation overlap between triggers and non-triggers. To resolve the above issues, in this paper, we propose a novel Hybrid Contrastive Learning method with a Task-Adaptive Threshold (abbreviated as HCL-TAT), which enables discriminative representation learning with a two-view contrastive loss (support-support and prototype-query), and devises an easily-adapted threshold to alleviate misidentification of triggers. Extensive experiments on the benchmark dataset FewEvent demonstrate the superiority of our method to achieve better results compared to the state-of-the-arts. All the data and codes will be available to facilitate future research.", + "author": "Ruihan Zhang; Wei Wei; Xian-Ling Mao; Rui Fang; Dangyang Chen", + "authorids": "/r/ruihan-zhang/; /w/wei-wei/; /x/xian-ling-mao/; /r/rui-fang/; /d/dangyang-chen/", + "bibtex": "@inproceedings{zhang-etal-2022-hcl,\n title = \"{HCL}-{TAT}: A Hybrid Contrastive Learning Method for Few-shot Event Detection with Task-Adaptive Threshold\",\n author = \"Zhang, Ruihan and\n Wei, Wei and\n Mao, Xian-Ling and\n Fang, Rui and\n Chen, Dangyang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.130/\",\n doi = \"10.18653/v1/2022.findings-emnlp.130\",\n pages = \"1808--1819\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.130.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.130/", + "pdf_size": 577934, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3904871011819575971&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Cognitive Computing and Intelligent Information Processing (CCIIP) Laboratory, School of Computer Science and Technology, Huazhong University of Science and Technology + Joint Laboratory of HUST and Pingan Property & Casualty Research (HPL); Cognitive Computing and Intelligent Information Processing (CCIIP) Laboratory, School of Computer Science and Technology, Huazhong University of Science and Technology + Joint Laboratory of HUST and Pingan Property & Casualty Research (HPL); Department of Computer Science and Technology, Beijing Institute of Technology; Ping An Property & Casualty Insurance company of China, Ltd; Ping An Property & Casualty Insurance company of China, Ltd", + "aff_domain": "hust.edu.cn;hust.edu.cn;bit.edu.cn;pingan.com.cn;pingan.com.cn", + "email": "hust.edu.cn;hust.edu.cn;bit.edu.cn;pingan.com.cn;pingan.com.cn", + "github": "https://github.com/CCIIPLab/HCL-TAT", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;1;2;2", + "aff_unique_norm": "Huazhong University of Science and Technology;Beijing Institute of Technology;Ping An Property & Casualty Insurance Company of China, Ltd", + "aff_unique_dep": "School of Computer Science and Technology;Department of Computer Science and Technology;", + "aff_unique_url": ";http://www.bit.edu.cn/;https://www.pingan.com", + "aff_unique_abbr": ";BIT;Ping An", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.692", + "title": "HEGEL: Hypergraph Transformer for Long Document Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Extractive summarization for long documents is challenging due to the extended structured input context. The long-distance sentence dependency hinders cross-sentence relations modeling, the critical step of extractive summarization. This paper proposes HEGEL, a hypergraph neural network for long document summarization by capturing high-order cross-sentence relations. HEGEL updates and learns effective sentence representations with hypergraph transformer layers and fuses different types of sentence dependencies, including latent topics, keywords coreference, and section structure. We validate HEGEL by conducting extensive experiments on two benchmark datasets, and experimental results demonstrate the effectiveness and efficiency of HEGEL.", + "author": "Haopeng Zhang; Xiao Liu; Jiawei Zhang", + "authorids": "/h/haopeng-zhang/; /x/xiao-liu/; /j/jiawei-zhang/", + "bibtex": "@inproceedings{zhang-etal-2022-hegel,\n title = \"{HEGEL}: Hypergraph Transformer for Long Document Summarization\",\n author = \"Zhang, Haopeng and\n Liu, Xiao and\n Zhang, Jiawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.692/\",\n doi = \"10.18653/v1/2022.emnlp-main.692\",\n pages = \"10167--10176\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.692.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.692/", + "pdf_size": 1328147, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6474046315723000700&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "IFM Lab, Department of Computer Science, University of California, Davis, CA, USA; IFM Lab, Department of Computer Science, University of California, Davis, CA, USA; IFM Lab, Department of Computer Science, University of California, Davis, CA, USA", + "aff_domain": "ifmlab.org;ifmlab.org;ifmlab.org", + "email": "ifmlab.org;ifmlab.org;ifmlab.org", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of California, Davis", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.ucdavis.edu", + "aff_unique_abbr": "UC Davis", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Davis", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.246", + "title": "HPT: Hierarchy-aware Prompt Tuning for Hierarchical Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Hierarchical text classification (HTC) is a challenging subtask of multi-label classification due to its complex label hierarchy.Recently, the pretrained language models (PLM)have been widely adopted in HTC through a fine-tuning paradigm. However, in this paradigm, there exists a huge gap between the classification tasks with sophisticated label hierarchy and the masked language model (MLM) pretraining tasks of PLMs and thus the potential of PLMs cannot be fully tapped.To bridge the gap, in this paper, we propose HPT, a Hierarchy-aware Prompt Tuning method to handle HTC from a multi-label MLM perspective.Specifically, we construct a dynamic virtual template and label words that take the form of soft prompts to fuse the label hierarchy knowledge and introduce a zero-bounded multi-label cross-entropy loss to harmonize the objectives of HTC and MLM.Extensive experiments show HPT achieves state-of-the-art performances on 3 popular HTC datasets and is adept at handling the imbalance and low resource situations. Our code is available at https://github.com/wzh9969/HPT.", + "author": "Zihan Wang; Peiyi Wang; Tianyu Liu; Binghuai Lin; Yunbo Cao; Zhifang Sui; Houfeng Wang", + "authorids": "/z/zihan-wang/; /p/peiyi-wang/; /t/tianyu-liu/; /b/binghuai-lin/; /y/yunbo-cao/; /z/zhifang-sui/; /h/houfeng-wang/", + "bibtex": "@inproceedings{wang-etal-2022-hpt,\n title = \"{HPT}: Hierarchy-aware Prompt Tuning for Hierarchical Text Classification\",\n author = \"Wang, Zihan and\n Wang, Peiyi and\n Liu, Tianyu and\n Lin, Binghuai and\n Cao, Yunbo and\n Sui, Zhifang and\n Wang, Houfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.246/\",\n doi = \"10.18653/v1/2022.emnlp-main.246\",\n pages = \"3740--3751\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.246.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.246/", + "pdf_size": 652260, + "gs_citation": 63, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=95963342310544364&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 6, + "aff": "MOE Key Laboratory of Computational Linguistics, Peking University, China; MOE Key Laboratory of Computational Linguistics, Peking University, China; Tencent Cloud Xiaowei; Tencent Cloud Xiaowei; Tencent Cloud Xiaowei; MOE Key Laboratory of Computational Linguistics, Peking University, China; MOE Key Laboratory of Computational Linguistics, Peking University, China", + "aff_domain": "gmail.com;gmail.com;tencent.com;tencent.com;tencent.com;pku.edu.cn;pku.edu.cn", + "email": "gmail.com;gmail.com;tencent.com;tencent.com;tencent.com;pku.edu.cn;pku.edu.cn", + "github": "https://github.com/wzh9969/HPT", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;1;1;0;0", + "aff_unique_norm": "Peking University;Tencent", + "aff_unique_dep": "MOE Key Laboratory of Computational Linguistics;Tencent Cloud Xiaowei", + "aff_unique_url": "http://www.pku.edu.cn;https://cloud.tencent.com", + "aff_unique_abbr": "PKU;Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.35", + "title": "Handling and Presenting Harmful Text in NLP Research", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Text data can pose a risk of harm. However, the risks are not fully understood, and how to handle, present, and discuss harmful text in a safe way remains an unresolved issue in the NLP community. We provide an analytical framework categorising harms on three axes: (1) the harm type (e.g., misinformation, hate speech or racial stereotypes); (2) whether a harm is sought as a feature of the research design if explicitly studying harmful content (e.g., training a hate speech classifier), versus unsought if harmful content is encountered when working on unrelated problems (e.g., language generation or part-of-speech tagging); and (3) who it affects, from people (mis)represented in the data to those handling the data and those publishing on the data. We provide advice for practitioners, with concrete steps for mitigating harm in research and in publication. To assist implementation we introduce HarmCheck \u2013 a documentation standard for handling and presenting harmful text in research.", + "author": "Hannah Kirk; Abeba Birhane; Bertie Vidgen; Leon Derczynski", + "authorids": "/h/hannah-kirk/; /a/abeba-birhane/; /b/bertie-vidgen/; /l/leon-derczynski/", + "bibtex": "@inproceedings{kirk-etal-2022-handling,\n title = \"Handling and Presenting Harmful Text in {NLP} Research\",\n author = \"Kirk, Hannah and\n Birhane, Abeba and\n Vidgen, Bertie and\n Derczynski, Leon\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.35/\",\n doi = \"10.18653/v1/2022.findings-emnlp.35\",\n pages = \"497--510\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.35.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.35/", + "pdf_size": 2201177, + "gs_citation": 46, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11972921587765870038&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "University of Oxford / The Alan Turing Institute; Mozilla Foundation / University College Dublin; The Alan Turing Institute; IT University of Copenhagen", + "aff_domain": "oii.ox.ac.uk;mozillafoundation.org;turing.ac.uk;itu.dk", + "email": "oii.ox.ac.uk;mozillafoundation.org;turing.ac.uk;itu.dk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "University of Oxford;Mozilla Foundation;The Alan Turing Institute;IT University of Copenhagen", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.ox.ac.uk;https://www.mozilla.org;https://www.turing.ac.uk;https://itu.dk", + "aff_unique_abbr": "Oxford;Mozilla;ATI;ITU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;2", + "aff_country_unique": "United Kingdom;United States;Denmark" + }, + { + "id": "2022.emnlp-main.665", + "title": "Hard Gate Knowledge Distillation - Leverage Calibration for Robust and Reliable Language Model", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In knowledge distillation, a student model is trained with supervisions from both knowledge from a teacher and observations drawn from a training data distribution. Knowledge of a teacher is considered a subject that holds inter-class relations which send a meaningful supervision to a student; hence, much effort has been put to find such knowledge to be distilled. In this paper, we explore a question that has been given little attention: \u201cwhen to distill such knowledge.\u201d The question is answered in our work with the concept of model calibration; we view a teacher model not only as a source of knowledge but also as a gauge to detect miscalibration of a student. This simple and yet novel view leads to a hard gate knowledge distillation scheme that switches between learning from a teacher model and training data. We verify the gating mechanism in the context of natural language generation at both the token-level and the sentence-level. Empirical comparisons with strong baselines show that hard gate knowledge distillation not only improves model generalization, but also significantly lowers model calibration error.", + "author": "Dongkyu Lee; Zhiliang Tian; Yingxiu Zhao; Ka Chun Cheung; Nevin Zhang", + "authorids": "/d/dongkyu-lee/; /z/zhiliang-tian/; /y/yingxiu-zhao/; /k/ka-chun-cheung/; /n/nevin-zhang/", + "bibtex": "@inproceedings{lee-etal-2022-hard,\n title = \"Hard Gate Knowledge Distillation - Leverage Calibration for Robust and Reliable Language Model\",\n author = \"Lee, Dongkyu and\n Tian, Zhiliang and\n Zhao, Yingxiu and\n Cheung, Ka Chun and\n Zhang, Nevin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.665/\",\n doi = \"10.18653/v1/2022.emnlp-main.665\",\n pages = \"9793--9803\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.665.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.665/", + "pdf_size": 757837, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10637339470037843040&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Engineering, HKUST + NVIDIA AI Technology Center, NVIDIA; College of Computer, National University of Defense Technology; Department of Computer Science and Engineering, HKUST; NVIDIA AI Technology Center, NVIDIA; Department of Computer Science and Engineering, HKUST", + "aff_domain": "cse.ust.hk;cse.ust.hk;cse.ust.hk;gmail.com;nvidia.com", + "email": "cse.ust.hk;cse.ust.hk;cse.ust.hk;gmail.com;nvidia.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;0;1;0", + "aff_unique_norm": "Hong Kong University of Science and Technology;NVIDIA;National University of Defense Technology", + "aff_unique_dep": "Department of Computer Science and Engineering;NVIDIA AI Technology Center;College of Computer", + "aff_unique_url": "https://www.hkust.edu.hk;https://www.nvidia.com;http://www.nudt.edu.cn/", + "aff_unique_abbr": "HKUST;NVIDIA;NUDT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.271", + "title": "Hardness-guided domain adaptation to recognise biomedical named entities under low-resource scenarios", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Domain adaptation is an effective solution to data scarcity in low-resource scenarios. However, when applied to token-level tasks such as bioNER, domain adaptation methods often suffer from the challenging linguistic characteristics that clinical narratives possess, which leads to unsatsifactory performance. In this paper, we present a simple yet effective hardness-guided domain adaptation framework for bioNER tasks that can effectively leverage the domain hardness information to improve the adaptability of the learnt model in the low-resource scenarios. Experimental results on biomedical datasets show that our model can achieve significant performance improvement over the recently published state-of-the-art (SOTA) MetaNER model.", + "author": "Ngoc Dang Nguyen; Lan Du; Wray Buntine; Changyou Chen; Richard Beare", + "authorids": "/n/ngoc-dang-nguyen/; /l/lan-du/; /w/wray-buntine/; /c/changyou-chen/; /r/richard-beare/", + "bibtex": "@inproceedings{nguyen-etal-2022-hardness,\n title = \"Hardness-guided domain adaptation to recognise biomedical named entities under low-resource scenarios\",\n author = \"Nguyen, Ngoc Dang and\n Du, Lan and\n Buntine, Wray and\n Chen, Changyou and\n Beare, Richard\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.271/\",\n doi = \"10.18653/v1/2022.emnlp-main.271\",\n pages = \"4063--4071\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.271.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.271/", + "pdf_size": 309558, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12598173769872065422&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Monash University; Monash University; VinUniversity; University at Buffalo; Monash University", + "aff_domain": "monash.edu;monash.edu;vinuni.edu.vn;buffalo.edu;monash.edu", + "email": "monash.edu;monash.edu;vinuni.edu.vn;buffalo.edu;monash.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "Monash University;VinUniversity;University at Buffalo", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.monash.edu;https://vinuni.edu.vn;https://www.buffalo.edu", + "aff_unique_abbr": "Monash;VinUni;UB", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;2;0", + "aff_country_unique": "Australia;Vietnam;United States" + }, + { + "id": "2022.emnlp-main.536", + "title": "HashFormers: Towards Vocabulary-independent Pre-trained Transformers", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transformer-based pre-trained language models are vocabulary-dependent, mapping by default each token to its corresponding embedding. This one-to-one mapping results into embedding matrices that occupy a lot of memory (i.e. millions of parameters) and grow linearly with the size of the vocabulary. Previous work on on-device transformers dynamically generate token embeddings on-the-fly without embedding matrices using locality-sensitive hashing over morphological information. These embeddings are subsequently fed into transformer layers for text classification. However, these methods are not pre-trained. Inspired by this line of work, we propose HashFormers, a new family of vocabulary-independent pre-trained transformers that support an unlimited vocabulary (i.e. all possible tokens in a corpus) given a substantially smaller fixed-sized embedding matrix. We achieve this by first introducing computationally cheap hashing functions that bucket together individual tokens to embeddings. We also propose three variants that do not require an embedding matrix at all, further reducing the memory requirements. We empirically demonstrate that HashFormers are more memory efficient compared to standard pre-trained transformers while achieving comparable predictive performance when fine-tuned on multiple text classification tasks. For example, our most efficient HashFormer variant has a negligible performance degradation (0.4% on GLUE) using only 99.1K parameters for representing the embeddings compared to 12.3-38M parameters of state-of-the-art models.", + "author": "Huiyin Xue; Nikolaos Aletras", + "authorids": "/h/huiyin-xue/; /n/nikolaos-aletras/", + "bibtex": "@inproceedings{xue-aletras-2022-hashformers,\n title = \"{H}ash{F}ormers: Towards Vocabulary-independent Pre-trained Transformers\",\n author = \"Xue, Huiyin and\n Aletras, Nikolaos\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.536/\",\n doi = \"10.18653/v1/2022.emnlp-main.536\",\n pages = \"7862--7874\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.536.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.536/", + "pdf_size": 580521, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14195447106072658396&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, University of Sheffield; Department of Computer Science, University of Sheffield", + "aff_domain": "sheffield.ac.uk;sheffield.ac.uk", + "email": "sheffield.ac.uk;sheffield.ac.uk", + "github": "https://github.com/HUIYINXUE/hashformer", + "project": "https://huggingface.co/klein9692", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Sheffield", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.sheffield.ac.uk", + "aff_unique_abbr": "Sheffield", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.findings-emnlp.355", + "title": "He Said, She Said: Style Transfer for Shifting the Perspective of Dialogues", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this work, we define a new style transfer task: perspective shift, which reframes a dialouge from informal first person to a formal third person rephrasing of the text. This task requires challenging coreference resolution, emotion attribution, and interpretation of informal text. We explore several baseline approaches and discuss further directions on this task when applied to short dialogues. As a sample application, we demonstrate that applying perspective shifting to a dialogue summarization dataset (SAMSum) substantially improves the zero-shot performance of extractive news summarization models on this data. Additionally, supervised extractive models perform better when trained on perspective shifted data than on the original dialogues. We release our code publicly.", + "author": "Amanda Bertsch; Graham Neubig; Matthew R. Gormley", + "authorids": "/a/amanda-bertsch/; /g/graham-neubig/; /m/matthew-r-gormley/", + "bibtex": "@inproceedings{bertsch-etal-2022-said,\n title = \"He Said, She Said: Style Transfer for Shifting the Perspective of Dialogues\",\n author = \"Bertsch, Amanda and\n Neubig, Graham and\n Gormley, Matthew R.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.355/\",\n doi = \"10.18653/v1/2022.findings-emnlp.355\",\n pages = \"4823--4840\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.355.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.355/", + "pdf_size": 218980, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18262002269109692575&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 6, + "aff": "Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University", + "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "github": "https://github.com/abertsch72/perspective-shifting", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.367", + "title": "HeLo: Learning-Free Lookahead Decoding for Conversation Infilling", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We propose Heuristic Guided Lookahead Decoding (HeLo), a novel decoding strategy for conversation infilling. Conversation infilling aims to generate a seamless bridge of utterances connecting a given pair of source and target utterances. HeLo does not require fine-tuning or extra models \u2013 only the generating model itself. Instead, HeLo leverages a greedy lookahead phase before committing to any token. The HeLo framework is simple and can augment conventional decoding strategies paired with any autoregressive language model. Smooth transitions between utterances are encouraged with an annealing schedule. Our experiments show HeLo outperforms several baselines when evaluated with both automatic and human evaluation metrics, which, we argue, are appropriate for the task.", + "author": "Ivan Lee; Taylor Berg-Kirkpatrick", + "authorids": "/i/ivan-lee/; /t/taylor-berg-kirkpatrick/", + "bibtex": "@inproceedings{lee-berg-kirkpatrick-2022-helo,\n title = \"{H}e{L}o: Learning-Free Lookahead Decoding for Conversation Infilling\",\n author = \"Lee, Ivan and\n Berg-Kirkpatrick, Taylor\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.367/\",\n doi = \"10.18653/v1/2022.findings-emnlp.367\",\n pages = \"4996--5008\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.367.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.367/", + "pdf_size": 370492, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1138849993498395489&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "UC San Diego; UC San Diego", + "aff_domain": "ucsd.edu;ucsd.edu", + "email": "ucsd.edu;ucsd.edu", + "github": "https://github.com/ivnle/helo", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of California, San Diego", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucsd.edu", + "aff_unique_abbr": "UCSD", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "San Diego", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.460", + "title": "Help me write a poem: Instruction Tuning as a Vehicle for Collaborative Poetry Writing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work in training large language models (LLMs) to follow natural language instructions has opened up exciting opportunities for natural language interface design. Building on the prior success of large language models in the realm of computer assisted creativity, in this work, we present CoPoet, a collaborative poetry writing system, with the goal of to study if LLM\u2019s actually improve the quality of the generated content. In contrast to auto-completing a user\u2019s text, CoPoet is controlled by user instructions that specify the attributes of the desired text, such as Write a sentence about \u2018love\u2019 or Write a sentence ending in \u2018fly\u2019. The core component of our system is a language model fine-tuned on a diverse collection of instructions for poetry writing. Our model is not only competitive to publicly available LLMs trained on instructions (InstructGPT), but also capable of satisfying unseen compositional instructions. A study with 15 qualified crowdworkers shows that users successfully write poems with CoPoet on diverse topics ranging from Monarchy to Climate change, which are preferred by third-party evaluators over poems written without the system.", + "author": "Tuhin Chakrabarty; Vishakh Padmakumar; He He", + "authorids": "/t/tuhin-chakrabarty/; /v/vishakh-padmakumar/; /h/he-he/", + "bibtex": "@inproceedings{chakrabarty-etal-2022-help,\n title = \"\\textit{Help me write a poem}: Instruction Tuning as a Vehicle for Collaborative Poetry Writing\",\n author = \"Chakrabarty, Tuhin and\n Padmakumar, Vishakh and\n He, He\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.460/\",\n doi = \"10.18653/v1/2022.emnlp-main.460\",\n pages = \"6848--6863\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.460.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.460/", + "pdf_size": 1139523, + "gs_citation": 74, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7154986204460191129&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, Columbia University; Center for Data Science, New York University + Department of Computer Science, New York University; Center for Data Science, New York University + Department of Computer Science, New York University", + "aff_domain": "cs.columbia.edu;nyu.edu;nyu.edu", + "email": "cs.columbia.edu;nyu.edu;nyu.edu", + "github": "https://github.com/vishakhpk/creative-instructions", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+1;1+1", + "aff_unique_norm": "Columbia University;New York University", + "aff_unique_dep": "Department of Computer Science;Center for Data Science", + "aff_unique_url": "https://www.columbia.edu;https://www.nyu.edu", + "aff_unique_abbr": "Columbia;NYU", + "aff_campus_unique_index": "1+1;1+1", + "aff_campus_unique": ";New York", + "aff_country_unique_index": "0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.371", + "title": "Helping the Weak Makes You Strong: Simple Multi-Task Learning Improves Non-Autoregressive Translators", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently, non-autoregressive (NAR) neural machine translation models have received increasing attention due to their efficient parallel decoding.However, the probabilistic framework of NAR models necessitates conditional independence assumption on target sequences, falling short of characterizing human language data.This drawback results in less informative learning signals for NAR models under conventional MLE training, thereby yielding unsatisfactory accuracy compared to their autoregressive (AR) counterparts.In this paper, we propose a simple and model-agnostic multi-task learning framework to provide more informative learning signals.During training stage, we introduce a set of sufficiently weak AR decoders that solely rely on the information provided by NAR decoder to make prediction, forcing the NAR decoder to become stronger or else it will be unable to support its weak AR partners.Experiments on WMT and IWSLT datasets show that our approach can consistently improve accuracy of multiple NAR baselines without adding any additional decoding overhead.", + "author": "Xinyou Wang; Zaixiang Zheng; Shujian Huang", + "authorids": "/x/xinyou-wang/; /z/zaixiang-zheng/; /s/shujian-huang/", + "bibtex": "@inproceedings{wang-etal-2022-helping,\n title = \"Helping the Weak Makes You Strong: Simple Multi-Task Learning Improves Non-Autoregressive Translators\",\n author = \"Wang, Xinyou and\n Zheng, Zaixiang and\n Huang, Shujian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.371/\",\n doi = \"10.18653/v1/2022.emnlp-main.371\",\n pages = \"5513--5519\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.371.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.371/", + "pdf_size": 322396, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13823984207795337486&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "\u2660National Key Laboratory for Novel Software Technology, Nanjing University + \u2661ByteDance AI Lab; \u2661ByteDance AI Lab; \u2660National Key Laboratory for Novel Software Technology, Nanjing University", + "aff_domain": "smail.nju.edu.cn;bytedance.com;nju.edu.cn", + "email": "smail.nju.edu.cn;bytedance.com;nju.edu.cn", + "github": "https://github.com/wxy-nlp/MultiTaskNAT", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;0", + "aff_unique_norm": "Nanjing University;ByteDance", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;AI Lab", + "aff_unique_url": "http://www.nju.edu.cn;https://www.bytedance.com", + "aff_unique_abbr": "Nanjing University;ByteDance", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.542", + "title": "HiSMatch: Historical Structure Matching based Temporal Knowledge Graph Reasoning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A Temporal Knowledge Graph (TKG) is a sequence of KGs with respective timestamps, which adopts quadruples in the form of (subject, relation, object, timestamp) to describe dynamic facts. TKG reasoning has facilitated many real-world applications via answering such queries as (query entity, query relation, ?, future timestamp) about future. This is actually a matching task between a query and candidate entities based on their historical structures, which reflect behavioral trends of the entities at different timestamps. In addition, recent KGs provide background knowledge of all the entities, which is also helpful for the matching. Thus, in this paper, we propose the Historical Structure Matching (HiSMatch) model. It applies two structure encoders to capture the semantic information contained in the historical structures of the query and candidate entities. Besides, it adopts another encoder to integrate the background knowledge into the model. TKG reasoning experiments on six benchmark datasets demonstrate the significant improvement of the proposed HiSMatch model, with up to 5.6% performance improvement in MRR, compared to the state-of-the-art baselines.", + "author": "Zixuan Li; Zhongni Hou; Saiping Guan; Xiaolong Jin; Weihua Peng; Long Bai; Yajuan Lyu; Wei Li; Jiafeng Guo; Xueqi Cheng", + "authorids": "/z/zixuan-li/; /z/zhongni-hou/; /s/saiping-guan/; /x/xiaolong-jin/; /w/weihua-peng/; /l/long-bai/; /y/yajuan-lyu/; /w/wei-li/; /j/jiafeng-guo/; /x/xueqi-cheng/", + "bibtex": "@inproceedings{li-etal-2022-hismatch,\n title = \"{H}i{SM}atch: Historical Structure Matching based Temporal Knowledge Graph Reasoning\",\n author = \"Li, Zixuan and\n Hou, Zhongni and\n Guan, Saiping and\n Jin, Xiaolong and\n Peng, Weihua and\n Bai, Long and\n Lyu, Yajuan and\n Li, Wei and\n Guo, Jiafeng and\n Cheng, Xueqi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.542/\",\n doi = \"10.18653/v1/2022.findings-emnlp.542\",\n pages = \"7328--7338\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.542.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.542/", + "pdf_size": 1709161, + "gs_citation": 46, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2915170116141211098&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "School of Computer Science and Technology, University of Chinese Academy of Sciences + CAS Key Laboratory of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences + Baidu Inc.; School of Computer Science and Technology, University of Chinese Academy of Sciences + CAS Key Laboratory of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences; School of Computer Science and Technology, University of Chinese Academy of Sciences + CAS Key Laboratory of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences\u2020; School of Computer Science and Technology, University of Chinese Academy of Sciences + CAS Key Laboratory of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences; Baidu Inc.; School of Computer Science and Technology, University of Chinese Academy of Sciences + CAS Key Laboratory of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences; Baidu Inc.; Baidu Inc.; School of Computer Science and Technology, University of Chinese Academy of Sciences + CAS Key Laboratory of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences; School of Computer Science and Technology, University of Chinese Academy of Sciences + CAS Key Laboratory of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences", + "aff_domain": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;baidu.com;ict.ac.cn;baidu.com;baidu.com;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;baidu.com;ict.ac.cn;baidu.com;baidu.com;ict.ac.cn;ict.ac.cn", + "github": "https://github.com/Lee-zix/HiSMatch", + "project": "", + "author_num": 10, + "aff_unique_index": "0+1+2;0+1;0+1;0+1;2;0+1;2;2;0+1;0+1", + "aff_unique_norm": "University of Chinese Academy of Sciences;Chinese Academy of Sciences;Baidu Inc.", + "aff_unique_dep": "School of Computer Science and Technology;Institute of Computing Technology;", + "aff_unique_url": "http://www.ucas.ac.cn;http://www.cas.cn;https://www.baidu.com", + "aff_unique_abbr": "UCAS;CAS;Baidu", + "aff_campus_unique_index": ";;;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0;0+0;0+0;0;0+0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.422", + "title": "Hidden State Variability of Pretrained Language Models Can Guide Computation Reduction for Transfer Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While transferring a pretrained language model, common approaches conventionally attach their task-specific classifiers to the top layer and adapt all the pretrained layers. We investigate whether one could make a task-specific selection on which subset of the layers to adapt and where to place the classifier. The goal is to reduce the computation cost of transfer learning methods (e.g. fine-tuning or adapter-tuning) without sacrificing its performance.We propose to select layers based on the variability of their hidden states given a task-specific corpus. We say a layer is already \u201cwell-specialized\u201d in a task if the within-class variability of its hidden states is low relative to the between-class variability. Our variability metric is cheap to compute and doesn\u2019t need any training or hyperparameter tuning. It is robust to data imbalance and data scarcity. Extensive experiments on the GLUE benchmark demonstrate that selecting layers based on our metric can yield significantly stronger performance than using the same number of top layers and often match the performance of fine-tuning or adapter-tuning the entire language model.", + "author": "Shuo Xie; Jiahao Qiu; Ankita Pasad; Li Du; Qing Qu; Hongyuan Mei", + "authorids": "/s/shuo-xie/; /j/jiahao-qiu/; /a/ankita-pasad/; /l/li-du/; /q/qing-qu/; /h/hongyuan-mei/", + "bibtex": "@inproceedings{xie-etal-2022-hidden,\n title = \"Hidden State Variability of Pretrained Language Models Can Guide Computation Reduction for Transfer Learning\",\n author = \"Xie, Shuo and\n Qiu, Jiahao and\n Pasad, Ankita and\n Du, Li and\n Qu, Qing and\n Mei, Hongyuan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.422/\",\n doi = \"10.18653/v1/2022.findings-emnlp.422\",\n pages = \"5750--5768\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.422.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.422/", + "pdf_size": 2001845, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2851685715449524088&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.610", + "title": "Hierarchical Multi-Label Classification of Scientific Documents", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Automatic topic classification has been studied extensively to assist managing and indexing scientific documents in a digital collection. With the large number of topics being available in recent years, it has become necessary to arrange them in a hierarchy. Therefore, the automatic classification systems need to be able to classify the documents hierarchically. In addition, each paper is often assigned to more than one relevant topic. For example, a paper can be assigned to several topics in a hierarchy tree. In this paper, we introduce a new dataset for hierarchical multi-label text classification (HMLTC) of scientific papers called SciHTC, which contains 186,160 papers and 1,234 categories from the ACM CCS tree. We establish strong baselines for HMLTC and propose a multi-task learning approach for topic classification with keyword labeling as an auxiliary task. Our best model achieves a Macro-F1 score of 34.57% which shows that this dataset provides significant research opportunities on hierarchical scientific topic classification. We make our dataset and code for all experiments publicly available.", + "author": "Mobashir Sadat; Cornelia Caragea", + "authorids": "/m/mobashir-sadat/; /c/cornelia-caragea/", + "bibtex": "@inproceedings{sadat-caragea-2022-hierarchical,\n title = \"Hierarchical Multi-Label Classification of Scientific Documents\",\n author = \"Sadat, Mobashir and\n Caragea, Cornelia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.610/\",\n doi = \"10.18653/v1/2022.emnlp-main.610\",\n pages = \"8923--8937\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.610.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.610/", + "pdf_size": 311107, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15328651165017375365&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Computer Science, University of Illinois Chicago; Computer Science, University of Illinois Chicago", + "aff_domain": "uic.edu;uic.edu", + "email": "uic.edu;uic.edu", + "github": "https://github.com/msadat3/SciHTC", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Illinois Chicago", + "aff_unique_dep": "Computer Science", + "aff_unique_url": "https://www.uic.edu", + "aff_unique_abbr": "UIC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chicago", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.563", + "title": "Hierarchical Phrase-Based Sequence-to-Sequence Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This paper describes a neural transducer that maintains the flexibility of standard sequence-to-sequence (seq2seq) models while incorporating hierarchical phrases as a source of inductive bias during training and as explicit constraints during inference. Our approach trains two models: a discriminative parser based on a bracketing transduction grammar whose derivation tree hierarchically aligns source and target phrases, and a neural seq2seq model that learns to translate the aligned phrases one-by-one. We use the same seq2seq model to translate at all phrase scales, which results in two inference modes: one mode in which the parser is discarded and only the seq2seq component is used at the sequence-level, and another in which the parser is combined with the seq2seq model. Decoding in the latter mode is done with the cube-pruned CKY algorithm, which is more involved but can make use of new translation rules during inference. We formalize our model as a source-conditioned synchronous grammar and develop an efficient variational inference algorithm for training. When applied on top of both randomly initialized and pretrained seq2seq models, we find that it performs well compared to baselines on small scale machine translation benchmarks.", + "author": "Bailin Wang; Ivan Titov; Jacob Andreas; Yoon Kim", + "authorids": "/b/bailin-wang/; /i/ivan-titov/; /j/jacob-andreas/; /y/yoon-kim/", + "bibtex": "@inproceedings{wang-etal-2022-hierarchical-phrase,\n title = \"Hierarchical Phrase-Based Sequence-to-Sequence Learning\",\n author = \"Wang, Bailin and\n Titov, Ivan and\n Andreas, Jacob and\n Kim, Yoon\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.563/\",\n doi = \"10.18653/v1/2022.emnlp-main.563\",\n pages = \"8211--8229\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.563.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.563/", + "pdf_size": 808990, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3381595270336774218&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "MIT; University of Edinburgh + University of Amsterdam; MIT; MIT", + "aff_domain": "mit.edu;inf.ed.ac.uk;mit.edu;mit.edu", + "email": "mit.edu;inf.ed.ac.uk;mit.edu;mit.edu", + "github": "https://github.com/berlino/btg-seq2seq", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology;University of Edinburgh;University of Amsterdam", + "aff_unique_dep": ";;", + "aff_unique_url": "https://web.mit.edu;https://www.ed.ac.uk;https://www.uva.nl", + "aff_unique_abbr": "MIT;Edinburgh;UvA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+2;0;0", + "aff_country_unique": "United States;United Kingdom;Netherlands" + }, + { + "id": "2022.findings-emnlp.247", + "title": "History-Aware Hierarchical Transformer for Multi-session Open-domain Dialogue System", + "track": "main", + "status": "finding", + "award": false, + "abstract": "With the evolution of pre-trained language models, current open-domain dialogue systems have achieved great progress in conducting one-session conversations. In contrast, Multi-Session Conversation (MSC), which consists of multiple sessions over a long term with the same user, is under-investigated. In this paper, we propose History-Aware Hierarchical Transformer (HAHT) for multi-session open-domain dialogue. HAHT maintains a long-term memory of history conversations and utilizes history information to understand current conversation context and generate well-informed and context-relevant responses. Specifically, HAHT first encodes history conversation sessions hierarchically into a history memory. Then, HAHT leverages historical information to facilitate the understanding of the current conversation context by encoding the history memory together with the current context with attention-based mechanisms. Finally, to explicitly utilize historical information, HAHT uses a history-aware response generator that switches between a generic vocabulary and a history-aware vocabulary. Experimental results on a large-scale MSC dataset suggest that the proposed HAHT model consistently outperforms baseline models. Human evaluation results support that HAHT generates more human-like, context-relevant, and history-relevant responses than baseline models.", + "author": "Tong Zhang; Yong Liu; Boyang Li; Zhiwei Zeng; Pengwei Wang; Yuan You; Chunyan Miao; Lizhen Cui", + "authorids": "/t/tong-zhang/; /y/yong-liu/; /b/boyang-li/; /z/zhiwei-zeng/; /p/pengwei-wang/; /y/yuan-you/; /c/chunyan-miao/; /l/lizhen-cui/", + "bibtex": "@inproceedings{zhang-etal-2022-history,\n title = \"History-Aware Hierarchical Transformer for Multi-session Open-domain Dialogue System\",\n author = \"Zhang, Tong and\n Liu, Yong and\n Li, Boyang and\n Zeng, Zhiwei and\n Wang, Pengwei and\n You, Yuan and\n Miao, Chunyan and\n Cui, Lizhen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.247/\",\n doi = \"10.18653/v1/2022.findings-emnlp.247\",\n pages = \"3395--3407\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.247.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.247/", + "pdf_size": 442309, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3851499189002884373&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.497", + "title": "Holistic Sentence Embeddings for Better Out-of-Distribution Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Detecting out-of-distribution (OOD) instances is significant for the safe deployment of NLP models. Among recent textual OOD detection works based on pretrained language models (PLMs), distance-based methods have shown superior performance. However, they estimate sample distance scores in the last-layer CLS embedding space and thus do not make full use of linguistic information underlying in PLMs. To address the issue, we propose to boost OOD detection by deriving more holistic sentence embeddings. On the basis of the observations that token averaging and layer combination contribute to improving OOD detection, we propose a simple embedding approach named Avg-Avg, which averages all token representations from each intermediate layer as the sentence embedding and significantly surpasses the state-of-the-art on a comprehensive suite of benchmarks by a 9.33% FAR95 margin. Furthermore, our analysis demonstrates that it indeed helps preserve general linguistic knowledge in fine-tuned PLMs and substantially benefits detecting background shifts. The simple yet effective embedding method can be applied to fine-tuned PLMs with negligible extra costs, providing a free gain in OOD detection. Our code is available at https://github.com/lancopku/Avg-Avg.", + "author": "Sishuo Chen; Xiaohan Bi; Rundong Gao; Xu Sun", + "authorids": "/s/sishuo-chen/; /x/xiaohan-bi/; /r/rundong-gao/; /x/xu-sun/", + "bibtex": "@inproceedings{chen-etal-2022-holistic,\n title = \"Holistic Sentence Embeddings for Better Out-of-Distribution Detection\",\n author = \"Chen, Sishuo and\n Bi, Xiaohan and\n Gao, Rundong and\n Sun, Xu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.497/\",\n doi = \"10.18653/v1/2022.findings-emnlp.497\",\n pages = \"6676--6686\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.497.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.497/", + "pdf_size": 729843, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4661214264866736038&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Center for Data Science, Peking University; Center for Data Science, Peking University; Center for Data Science, Peking University; MOE Key Laboratory of Computational Linguistics, School of Computer Science, Peking University", + "aff_domain": "pku.edu.cn;stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;stu.pku.edu.cn;stu.pku.edu.cn;pku.edu.cn", + "github": "https://github.com/lancopku/Avg-Avg", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "Center for Data Science", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.172", + "title": "How Far are We from Robust Long Abstractive Summarization?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Abstractive summarization has made tremendous progress in recent years. In this work, we perform fine-grained human annotations to evaluate long document abstractive summarization systems (i.e., models and metrics) with the aim of implementing them to generate reliable summaries. For long document abstractive models, we show that the constant strive for state-of-the-art ROUGE results can lead us to generate more relevant summaries but not factual ones. For long document evaluation metrics, human evaluation results show that ROUGE remains the best at evaluating the relevancy of a summary. It also reveals important limitations of factuality metrics in detecting different types of factual errors and the reasons behind the effectiveness of BARTScore. We then suggest promising directions in the endeavor of developing factual consistency metrics. Finally, we release our annotated long document dataset with the hope that it can contribute to the development of metrics across a broader range of summarization settings.", + "author": "Huan Yee Koh; Jiaxin Ju; He Zhang; Ming Liu; Shirui Pan", + "authorids": "/h/huan-yee-koh/; /j/jiaxin-ju/; /h/he-zhang/; /m/ming-liu/; /s/shirui-pan/", + "bibtex": "@inproceedings{koh-etal-2022-far,\n title = \"How Far are We from Robust Long Abstractive Summarization?\",\n author = \"Koh, Huan Yee and\n Ju, Jiaxin and\n Zhang, He and\n Liu, Ming and\n Pan, Shirui\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.172/\",\n doi = \"10.18653/v1/2022.emnlp-main.172\",\n pages = \"2682--2698\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.172.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.172/", + "pdf_size": 424307, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7186421444895442526&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Faculty of Information Technology, Monash University, Australia; Independent Researcher; Zhongtukexin Co. Ltd., Beijing, China; School of Information Technology, Deakin University, Australia; School of Information and Communication Technology, Griffith University, Australia", + "aff_domain": "monash.edu;gmail.com;kxsz.net;deakin.edu.au;griffith.edu.au", + "email": "monash.edu;gmail.com;kxsz.net;deakin.edu.au;griffith.edu.au", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;4", + "aff_unique_norm": "Monash University;Independent Researcher;Zhongtukexin Co. Ltd.;Deakin University;Griffith University", + "aff_unique_dep": "Faculty of Information Technology;;;School of Information Technology;School of Information and Communication Technology", + "aff_unique_url": "https://www.monash.edu;;;https://www.deakin.edu.au;https://www.griffith.edu.au", + "aff_unique_abbr": "Monash;;;Deakin;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;2;0;0", + "aff_country_unique": "Australia;;China" + }, + { + "id": "2022.emnlp-main.62", + "title": "How Large Language Models are Transforming Machine-Paraphrase Plagiarism", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The recent success of large language models for text generation poses a severe threat to academic integrity, as plagiarists can generate realistic paraphrases indistinguishable from original work.However, the role of large autoregressive models in generating machine-paraphrased plagiarism and their detection is still incipient in the literature.This work explores T5 and GPT3 for machine-paraphrase generation on scientific articles from arXiv, student theses, and Wikipedia.We evaluate the detection performance of six automated solutions and one commercial plagiarism detection software and perform a human study with 105 participants regarding their detection performance and the quality of generated examples.Our results suggest that large language models can rewrite text humans have difficulty identifying as machine-paraphrased (53% mean acc.).Human experts rate the quality of paraphrases generated by GPT-3 as high as original texts (clarity 4.0/5, fluency 4.2/5, coherence 3.8/5).The best-performing detection model (GPT-3) achieves 66% F1-score in detecting paraphrases.We make our code, data, and findings publicly available to facilitate the development of detection solutions.", + "author": "Jan Philip Wahle; Terry Ruas; Frederic Kirstein; Bela Gipp", + "authorids": "/j/jan-philip-wahle/; /t/terry-ruas/; /f/frederic-kirstein/; /b/bela-gipp/", + "bibtex": "@inproceedings{wahle-etal-2022-large,\n title = \"How Large Language Models are Transforming Machine-Paraphrase Plagiarism\",\n author = \"Wahle, Jan Philip and\n Ruas, Terry and\n Kirstein, Frederic and\n Gipp, Bela\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.62/\",\n doi = \"10.18653/v1/2022.emnlp-main.62\",\n pages = \"952--963\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.62.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.62/", + "pdf_size": 3601222, + "gs_citation": 63, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4936244490385614522&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Georg-August-Universit\u00e4t G\u00f6ttingen, Germany; Georg-August-Universit\u00e4t G\u00f6ttingen, Germany; Mercedes-Benz Group AG, Germany; Georg-August-Universit\u00e4t G\u00f6ttingen, Germany", + "aff_domain": "gipplab.org; ; ; ", + "email": "gipplab.org; ; ; ", + "github": "https://github.com/jpwahle/emnlp22-transforming", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Georg-August-Universit\u00e4t G\u00f6ttingen;Mercedes-Benz Group AG", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uni-goettingen.de;https://www.mercedes-benz.com", + "aff_unique_abbr": "GAU;MBG", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.101", + "title": "How Much Does Attention Actually Attend? Questioning the Importance of Attention in Pretrained Transformers", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The attention mechanism is considered the backbone of the widely-used Transformer architecture. It contextualizes the input by computing input-specific attention matrices. We find that this mechanism, while powerful and elegant, is not as important as typically thought for pretrained language models. We introduce PAPA, a new probing method that replaces the input-dependent attention matrices with constant ones\u2014the average attention weights over multiple inputs. We use PAPA to analyze several established pretrained Transformers on six downstream tasks. We find that without any input-dependent attention, all models achieve competitive performance\u2014an average relative drop of only 8% from the probing baseline. Further, little or no performance drop is observed when replacing half of the input-dependent attention matrices with constant (input-independent) ones. Interestingly, we show that better-performing models lose more from applying our method than weaker models, suggesting that the utilization of the input-dependent attention mechanism might be a factor in their success. Our results motivate research on simpler alternatives to input-dependent attention, as well as on methods for better utilization of this mechanism in the Transformer architecture.", + "author": "Michael Hassid; Hao Peng; Daniel Rotem; Jungo Kasai; Ivan Montero; Noah A. Smith; Roy Schwartz", + "authorids": "/m/michael-hassid/; /h/hao-peng/; /d/daniel-rotem/; /j/jungo-kasai/; /i/ivan-montero/; /n/noah-a-smith/; /r/roy-schwartz/", + "bibtex": "@inproceedings{hassid-etal-2022-much,\n title = \"How Much Does Attention Actually Attend? Questioning the Importance of Attention in Pretrained Transformers\",\n author = \"Hassid, Michael and\n Peng, Hao and\n Rotem, Daniel and\n Kasai, Jungo and\n Montero, Ivan and\n Smith, Noah A. and\n Schwartz, Roy\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.101/\",\n doi = \"10.18653/v1/2022.findings-emnlp.101\",\n pages = \"1403--1416\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.101.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.101/", + "pdf_size": 594254, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2519257119659989818&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "\u2661School of Computer Science & Engineering, Hebrew University of Jerusalem + \u2662Allen Institute for Artificial Intelligence; \u2662Allen Institute for Artificial Intelligence + \u22c6Apple, Inc. + \u2660Paul G. Allen School of Computer Science & Engineering, University of Washington; \u2661School of Computer Science & Engineering, Hebrew University of Jerusalem; \u2660Paul G. Allen School of Computer Science & Engineering, University of Washington; \u22c6Apple, Inc.; \u2660Paul G. Allen School of Computer Science & Engineering, University of Washington + \u2661School of Computer Science & Engineering, Hebrew University of Jerusalem; \u2661School of Computer Science & Engineering, Hebrew University of Jerusalem", + "aff_domain": "mail.huji.ac.il;allenai.org;mail.huji.ac.il;cs.washington.edu;apple.com;cs.washington.edu;mail.huji.ac.il", + "email": "mail.huji.ac.il;allenai.org;mail.huji.ac.il;cs.washington.edu;apple.com;cs.washington.edu;mail.huji.ac.il", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1+2+3;0;3;2;3+0;0", + "aff_unique_norm": "Hebrew University of Jerusalem;Allen Institute for Artificial Intelligence;Apple Inc.;University of Washington", + "aff_unique_dep": "School of Computer Science & Engineering;;;Paul G. Allen School of Computer Science & Engineering", + "aff_unique_url": "http://www.huji.ac.il;https://www.allenai.org;https://www.apple.com;https://www.cs.washington.edu", + "aff_unique_abbr": "HUJI;AI2;Apple;UW", + "aff_campus_unique_index": "0;2;0;2;2+0;0", + "aff_campus_unique": "Jerusalem;;Seattle", + "aff_country_unique_index": "0+1;1+1+1;0;1;1;1+0;0", + "aff_country_unique": "Israel;United States" + }, + { + "id": "2022.findings-emnlp.143", + "title": "How sensitive are translation systems to extra contexts? Mitigating gender bias in Neural Machine Translation models through relevant contexts.", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Neural Machine Translation systems built on top of Transformer-based architectures are routinely improving the state-of-the-art in translation quality according to word-overlap metrics. However, a growing number of studies also highlight the inherent gender bias that these models incorporate during training, which reflects poorly in their translations. In this work, we investigate whether these models can be instructed to fix their bias during inference using targeted, guided instructions as contexts. By translating relevant contextual sentences during inference along with the input, we observe large improvements in reducing the gender bias in translations, across three popular test suites (WinoMT, BUG, SimpleGen). We further propose a novel metric to assess several large pre-trained models (OPUS-MT, M2M-100) on their sensitivity towards using contexts during translation to correct their biases. Our approach requires no fine-tuning, and thus can be used easily in production systems to de-bias translations from stereotypical gender-occupation bias. We hope our method, along with our metric, can be used to build better, bias-free translation systems.", + "author": "Shanya Sharma; Manan Dey; Koustuv Sinha", + "authorids": "/s/shanya-sharma/; /m/manan-dey/; /k/koustuv-sinha/", + "bibtex": "@inproceedings{sharma-etal-2022-sensitive,\n title = \"How sensitive are translation systems to extra contexts? Mitigating gender bias in Neural Machine Translation models through relevant contexts.\",\n author = \"Sharma, Shanya and\n Dey, Manan and\n Sinha, Koustuv\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.143/\",\n doi = \"10.18653/v1/2022.findings-emnlp.143\",\n pages = \"1968--1984\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.143.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.143/", + "pdf_size": 871212, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6155012675253275877&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Walmart Labs, India; SAP Labs, India; McGill University, Montreal, Canada + Mila - Quebec AI Institute", + "aff_domain": "walmart.com;sap.com;mail.mcgill.ca", + "email": "walmart.com;sap.com;mail.mcgill.ca", + "github": "https://github.com/manandey/bias_machine_translation", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2+3", + "aff_unique_norm": "Walmart Labs;SAP Labs;McGill University;Quebec AI Institute", + "aff_unique_dep": ";;;AI Institute", + "aff_unique_url": "https://labs.walmart.com;https://labs.sap/;https://www.mcgill.ca;https://mila.quebec", + "aff_unique_abbr": ";SAP Labs;McGill;Mila", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Montreal", + "aff_country_unique_index": "0;0;1+1", + "aff_country_unique": "India;Canada" + }, + { + "id": "2022.findings-emnlp.310", + "title": "How to Do Things without Words: Modeling Semantic Drift of Emoji", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Emoji have become a significant part of our informal textual communication. Previous work, addressing the societal and linguistic functions of emoji, overlooked the relation between the semantics and the visual variations of the symbols. In this paper we model and analyze the semantic drift of emoji and discuss the features that may be contributing to the drift, some are unique to emoji and some are more general. Specifically, we explore the relations between graphical changes and semantic changes.", + "author": "Eyal Arviv; Oren Tsur", + "authorids": "/e/eyal-arviv/; /o/oren-tsur/", + "bibtex": "@inproceedings{arviv-tsur-2022-things,\n title = \"How to Do Things without Words: Modeling Semantic Drift of Emoji\",\n author = \"Arviv, Eyal and\n Tsur, Oren\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.310/\",\n doi = \"10.18653/v1/2022.findings-emnlp.310\",\n pages = \"4206--4211\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.310.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.310/", + "pdf_size": 778593, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6367509272890761071&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Software and Information Systems Engineering, Ben Gurion University of the Negev; Software and Information Systems Engineering, Ben Gurion University of the Negev", + "aff_domain": "post.bgu.ac.il;bgu.ac.il", + "email": "post.bgu.ac.il;bgu.ac.il", + "github": "", + "project": "https://unicode.org/emoji/charts/full-emoji-list.html", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Ben Gurion University of the Negev", + "aff_unique_dep": "Software and Information Systems Engineering", + "aff_unique_url": "https://www.bgu.ac.il", + "aff_unique_abbr": "BGU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.findings-emnlp.539", + "title": "How to Represent Context Better? An Empirical Study on Context Modeling for Multi-turn Response Selection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Building retrieval-based dialogue models that can predict appropriate responses based on the understanding of multi-turn context messages is a challenging problem. Early models usually concatenate all utterances or independently encode each dialogue turn, which may lead to an inadequate understanding of dialogue status. Although a few researchers have noticed the importance of context modeling in multi-turn response prediction, there is no systematic comparison to analyze how to model context effectively and no framework to unify those methods. In this paper, instead of configuring new architectures, we investigate how to improve existing models with a better context modeling method. Specifically, we heuristically summarize three categories of turn-aware context modeling strategies which model the context messages from the perspective of sequential relationship, local relationship, and query-aware manner respectively. A Turn-Aware Context Modeling (TACM) layer is explored to flexibly adapt and unify these context modeling strategies to several advanced response selection models. Evaluation results on three public data sets indicate that employing each individual context modeling strategy or multiple strategies can consistently improve the performance of existing models.", + "author": "Jiazhan Feng; Chongyang Tao; Chang Liu; Rui Yan; Dongyan Zhao", + "authorids": "/j/jiazhan-feng/; /c/chongyang-tao/; /c/chang-liu/; /r/rui-yan/; /d/dongyan-zhao/", + "bibtex": "@inproceedings{feng-etal-2022-represent,\n title = \"How to Represent Context Better? An Empirical Study on Context Modeling for Multi-turn Response Selection\",\n author = \"Feng, Jiazhan and\n Tao, Chongyang and\n Liu, Chang and\n Yan, Rui and\n Zhao, Dongyan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.539/\",\n doi = \"10.18653/v1/2022.findings-emnlp.539\",\n pages = \"7285--7298\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.539.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.539/", + "pdf_size": 511894, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2414176849342958424&as_sdt=800005&sciodt=0,15&hl=en", + "gs_version_total": 2, + "aff": "Wangxuan Institute of Computer Technology, Peking University + School of Intelligence Science and Technology, Peking University + Center for Data Science, Peking University; Wangxuan Institute of Computer Technology, Peking University; Wangxuan Institute of Computer Technology, Peking University + Center for Data Science, Peking University + Beijing Institute for General Artificial Intelligence + State Key Laboratory of Media Convergence Production Technology and Systems; Gaoling School of Artificial Intelligence, Renmin University of China; Wangxuan Institute of Computer Technology, Peking University + Center for Data Science, Peking University + Beijing Institute for General Artificial Intelligence + State Key Laboratory of Media Convergence Production Technology and Systems", + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;ruc.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;ruc.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0+0;0;0+0+1+2;3;0+0+1+2", + "aff_unique_norm": "Peking University;Beijing Institute for General Artificial Intelligence;State Key Laboratory of Media Convergence Production Technology and Systems;Renmin University of China", + "aff_unique_dep": "Wangxuan Institute of Computer Technology;;;Gaoling School of Artificial Intelligence", + "aff_unique_url": "http://www.pku.edu.cn;http://www.bigaiai.org/;;http://www.ruc.edu.cn", + "aff_unique_abbr": "PKU;BIGAI;;RUC", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0+0;0;0+0+0+0;0;0+0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.521", + "title": "How to Stop an Avalanche? JoDeM: Joint Decision Making through Compare and Contrast for Dialog State Tracking", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Dialog state tracking (DST) is a core component in task-oriented dialog systems. Existing state-of-the-art DST model incorporates insight and intuition from the human experience into design of supplementary labels, which greatly assisted the training process of turn-by-turn DST model. Though the turn-by-turn scheme and supplementary labels enabled satisfactory performance on the task, most of the DST models of this fashion label or process the raw dialogue data on the premise that the last turn dialogue state is always correct, which is usually not the case. In this paper, we address the negative impact resulted from the premise above as the avalanche phenomenon. After that, we propose JoDeM, a state-of-the-art DST model which can tackle the Avalanche phenomenon with two mechanisms. First mechanism is a jointly decision making method to extract key information from the dialogue. Second mechanism is a compare and contrast dialogue update technique to prevent error accumulation. Example study and graph analysis are presented to support our claim about the harmfulness of avalanche phenomenon. We also conduct quantitative and qualitative experiments on the high quality MultiWOZ2.3 corpus dataset to demonstrate that the proposed model not only outperforms the existing state-of-the-art methods, but also proves the validity of solving avalanche degradation problem.", + "author": "Haoming Wang; Wang Xin", + "authorids": "/h/haoming-wang/; /w/wang-xin/", + "bibtex": "@inproceedings{wang-xin-2022-stop,\n title = \"How to Stop an Avalanche? {J}o{D}e{M}: Joint Decision Making through Compare and Contrast for Dialog State Tracking\",\n author = \"Wang, Haoming and\n Xin, Wang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.521/\",\n doi = \"10.18653/v1/2022.findings-emnlp.521\",\n pages = \"7030--7041\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.521.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.521/", + "pdf_size": 2024121, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18042827788683771731&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2 + }, + { + "id": "2022.emnlp-main.252", + "title": "How to disagree well: Investigating the dispute tactics used on Wikipedia", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Disagreements are frequently studied from the perspective of either detecting toxicity or analysing argument structure. We propose a framework of dispute tactics which unifies these two perspectives, as well as other dialogue acts which play a role in resolving disputes, such as asking questions and providing clarification. This framework includes a preferential ordering among rebuttal-type tactics, ranging from ad hominem attacks to refuting the central argument. Using this framework, we annotate 213 disagreements (3,865 utterances) from Wikipedia Talk pages. This allows us to investigate research questions around the tactics used in disagreements; for instance, we provide empirical validation of the approach to disagreement recommended by Wikipedia. We develop models for multilabel prediction of dispute tactics in an utterance, achieving the best performance with a transformer-based label powerset model. Adding an auxiliary task to incorporate the ordering of rebuttal tactics further yields a statistically significant increase. Finally, we show that these annotations can be used to provide useful additional signals to improve performance on the task of predicting escalation.", + "author": "Christine De Kock; Tom Stafford; Andreas Vlachos", + "authorids": "/c/christine-de-kock/; /t/tom-stafford/; /a/andreas-vlachos/", + "bibtex": "@inproceedings{de-kock-vlachos-2022-disagree,\n title = \"How to disagree well: Investigating the dispute tactics used on {W}ikipedia\",\n author = \"De Kock, Christine and\n Stafford, Tom and\n Vlachos, Andreas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.252/\",\n doi = \"10.18653/v1/2022.emnlp-main.252\",\n pages = \"3824--3837\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.252.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.252/", + "pdf_size": 424053, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=801741735908613454&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science and Technology, University of Cambridge; Department of Psychology, University of Sheffield; Department of Computer Science and Technology, University of Cambridge", + "aff_domain": "cam.ac.uk;sheffield.ac.uk;cam.ac.uk", + "email": "cam.ac.uk;sheffield.ac.uk;cam.ac.uk", + "github": "github.com/christinedekock11/wikitactics3824", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Cambridge;University of Sheffield", + "aff_unique_dep": "Department of Computer Science and Technology;Department of Psychology", + "aff_unique_url": "https://www.cam.ac.uk;https://www.sheffield.ac.uk", + "aff_unique_abbr": "Cambridge;Sheffield", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Cambridge;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.88", + "title": "How well can Text-to-Image Generative Models understand Ethical Natural Language Interventions?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Text-to-image generative models have achieved unprecedented success in generating high-quality images based on natural language descriptions. However, it is shown that these models tend to favor specific social groups when prompted with neutral text descriptions (e.g., \u2018a photo of a lawyer\u2019). Following Zhao et al. (2021), we study the effect on the diversity of the generated images when adding ethical intervention that supports equitable judgment (e.g., \u2018if all individuals can be a lawyer irrespective of their gender\u2019) in the input prompts. To this end, we introduce an Ethical NaTural Language Interventions in Text-to-Image GENeration (ENTIGEN) benchmark dataset to evaluate the change in image generations conditional on ethical interventions across three social axes \u2013 gender, skin color, and culture. Through CLIP-based and human evaluation on minDALL.E, DALL.E-mini and Stable Diffusion, we find that the model generations cover diverse social groups while preserving the image quality. In some cases, the generations would be anti-stereotypical (e.g., models tend to create images with individuals that are perceived as man when fed with prompts about makeup) in the presence of ethical intervention. Preliminary studies indicate that a large change in the model predictions is triggered by certain phrases such as \u2018irrespective of gender\u2019 in the context of gender bias in the ethical interventions. We release code and annotated data at https://github.com/Hritikbansal/entigen_emnlp.", + "author": "Hritik Bansal; Da Yin; Masoud Monajatipoor; Kai-Wei Chang", + "authorids": "/h/hritik-bansal/; /d/da-yin/; /m/masoud-monajatipoor/; /k/kai-wei-chang/", + "bibtex": "@inproceedings{bansal-etal-2022-well,\n title = \"How well can Text-to-Image Generative Models understand Ethical Natural Language Interventions?\",\n author = \"Bansal, Hritik and\n Yin, Da and\n Monajatipoor, Masoud and\n Chang, Kai-Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.88/\",\n doi = \"10.18653/v1/2022.emnlp-main.88\",\n pages = \"1358--1370\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.88.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.88/", + "pdf_size": 1013845, + "gs_citation": 98, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9977158468852414464&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Computer Science Department, University of California, Los Angeles; Computer Science Department, University of California, Los Angeles; Computer Science Department, University of California, Los Angeles; Computer Science Department, University of California, Los Angeles", + "aff_domain": "cs.ucla.edu;cs.ucla.edu;ucla.edu;cs.ucla.edu", + "email": "cs.ucla.edu;cs.ucla.edu;ucla.edu;cs.ucla.edu", + "github": "https://github.com/Hritikbansal/entigen_emnlp", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of California, Los Angeles", + "aff_unique_dep": "Computer Science Department", + "aff_unique_url": "https://www.ucla.edu", + "aff_unique_abbr": "UCLA", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.389", + "title": "How \u201cMulti\u201d is Multi-Document Summarization?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The task of multi-document summarization (MDS) aims at models that, given multiple documents as input, are able to generate a summary that combines disperse information, originally spread __across__ these documents. Accordingly, it is expected that both reference summaries in MDS datasets, as well as system summaries, would indeed be based on such dispersed information. In this paper, we argue for quantifying and assessing this expectation. To that end, we propose an automated measure for evaluating the degree to which a summary is \u201cdisperse\u201d, in the sense of the number of source documents needed to cover its content. We apply our measure to empirically analyze several popular MDS datasets, with respect to their reference summaries, as well as the output of state-of-the-art systems. Our results show that certain MDS datasets barely require combining information from multiple documents, where a single document often covers the full summary content. Overall, we advocate using our metric for assessing and improving the degree to which summarization datasets require combining multi-document information, and similarly how summarization models actually meet this challenge.", + "author": "Ruben Wolhandler; Arie Cattan; Ori Ernst; Ido Dagan", + "authorids": "/r/ruben-wolhandler/; /a/arie-cattan/; /o/ori-ernst/; /i/ido-dagan/", + "bibtex": "@inproceedings{wolhandler-etal-2022-multi,\n title = \"How {\\textquotedblleft}Multi{\\textquotedblright} is Multi-Document Summarization?\",\n author = \"Wolhandler, Ruben and\n Cattan, Arie and\n Ernst, Ori and\n Dagan, Ido\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.389/\",\n doi = \"10.18653/v1/2022.emnlp-main.389\",\n pages = \"5761--5769\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.389.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.389/", + "pdf_size": 382712, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12662377252011029854&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Computer Science Department, Bar Ilan University; Computer Science Department, Bar Ilan University; Computer Science Department, Bar Ilan University; Computer Science Department, Bar Ilan University", + "aff_domain": "gmail.com;gmail.com;gmail.com;cs.biu.ac.il", + "email": "gmail.com;gmail.com;gmail.com;cs.biu.ac.il", + "github": "https://github.com/ariecattan/multi_mds", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Bar Ilan University", + "aff_unique_dep": "Computer Science Department", + "aff_unique_url": "https://www.biu.ac.il", + "aff_unique_abbr": "BIU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.findings-emnlp.321", + "title": "HumSet: Dataset of Multilingual Information Extraction and Classification for Humanitarian Crises Response", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Timely and effective response to humanitarian crises requires quick and accurate analysis of large amounts of text data \u2013 a process that can highly benefit from expert-assisted NLP systems trained on validated and annotated data in the humanitarian response domain. To enable creation of such NLP systems, we introduce and release HumSet, a novel and rich multilingual dataset of humanitarian response documents annotated by experts in the humanitarian response community. The dataset provides documents in three languages (English, French, Spanish) and covers a variety of humanitarian crises from 2018 to 2021 across the globe. For each document, HUMSET provides selected snippets (entries) as well as assigned classes to each entry annotated using common humanitarian information analysis frameworks. HUMSET also provides novel and challenging entry extraction and multi-label entry classification tasks. In this paper, we take a first step towards approaching these tasks and conduct a set of experiments on Pre-trained Language Models (PLM) to establish strong baselines for future research in this domain. The dataset is available at https://blog.thedeep.io/humset/.", + "author": "Selim Fekih; Nicolo\u2019 Tamagnone; Benjamin Minixhofer; Ranjan Shrestha; Ximena Contla; Ewan Oglethorpe; Navid Rekabsaz", + "authorids": "/s/selim-fekih/; /n/nicolo-tamagnone/; /b/benjamin-minixhofer/; /r/ranjan-shrestha/; /x/ximena-contla/; /e/ewan-oglethorpe/; /n/navid-rekabsaz/", + "bibtex": "@inproceedings{fekih-etal-2022-humset,\n title = \"{H}um{S}et: Dataset of Multilingual Information Extraction and Classification for Humanitarian Crises Response\",\n author = \"Fekih, Selim and\n Tamagnone, Nicolo{'} and\n Minixhofer, Benjamin and\n Shrestha, Ranjan and\n Contla, Ximena and\n Oglethorpe, Ewan and\n Rekabsaz, Navid\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.321/\",\n doi = \"10.18653/v1/2022.findings-emnlp.321\",\n pages = \"4379--4389\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.321.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.321/", + "pdf_size": 249552, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8046503043077400398&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Data Friendly Space; Data Friendly Space; Johannes Kepler University Linz, LIT AI Lab, Austria; ToggleCorp Solutions; Data Friendly Space; Data Friendly Space; Johannes Kepler University Linz, LIT AI Lab, Austria", + "aff_domain": "datafriendlyspace.org;datafriendlyspace.org;jku.at;togglecorp.com;datafriendlyspace.org;datafriendlyspace.org;jku.at", + "email": "datafriendlyspace.org;datafriendlyspace.org;jku.at;togglecorp.com;datafriendlyspace.org;datafriendlyspace.org;jku.at", + "github": "", + "project": "https://blog.thedeep.io/humset/", + "author_num": 7, + "aff_unique_index": "0;0;1;2;0;0;1", + "aff_unique_norm": "Data Friendly Space;Johannes Kepler University Linz;ToggleCorp Solutions", + "aff_unique_dep": ";LIT AI Lab;", + "aff_unique_url": ";https://www.jku.at;", + "aff_unique_abbr": ";JKU;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Linz", + "aff_country_unique_index": "1;1", + "aff_country_unique": ";Austria" + }, + { + "id": "2022.emnlp-main.694", + "title": "Human Guided Exploitation of Interpretable Attention Patterns in Summarization and Topic Segmentation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The multi-head self-attention mechanism of the transformer model has been thoroughly investigated recently. In one vein of study, researchers are interested in understanding why and how transformers work. In another vein, researchers propose new attention augmentation methods to make transformers more accurate, efficient and interpretable. In this paper, we combine these two lines of research in a human-in-the-loop pipeline to first discover important task-specific attention patterns. Then those patterns are injected, not only to smaller models, but also to the original model. The benefits of our pipeline and discovered patterns are demonstrated in two case studies with extractive summarization and topic segmentation. After discovering interpretable patterns in BERT-based models fine-tuned for the two downstream tasks, experiments indicate that when we inject the patterns into attention heads, the models show considerable improvements in accuracy and efficiency.", + "author": "Raymond Li; Wen Xiao; Linzi Xing; Lanjun Wang; Gabriel Murray; Giuseppe Carenini", + "authorids": "/r/raymond-li/; /w/wen-xiao/; /l/linzi-xing/; /l/lanjun-wang/; /g/gabriel-murray/; /g/giuseppe-carenini/", + "bibtex": "@inproceedings{li-etal-2022-human,\n title = \"Human Guided Exploitation of Interpretable Attention Patterns in Summarization and Topic Segmentation\",\n author = \"Li, Raymond and\n Xiao, Wen and\n Xing, Linzi and\n Wang, Lanjun and\n Murray, Gabriel and\n Carenini, Giuseppe\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.694/\",\n doi = \"10.18653/v1/2022.emnlp-main.694\",\n pages = \"10189--10204\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.694.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.694/", + "pdf_size": 1440890, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18037184618784163929&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "University of British Columbia; University of British Columbia; University of British Columbia; Tianjin University + University of British Columbia; University of Fraser Valley; University of British Columbia", + "aff_domain": "cs.ubc.ca;cs.ubc.ca;cs.ubc.ca;tju.edu.cn;ufv.ca;cs.ubc.ca", + "email": "cs.ubc.ca;cs.ubc.ca;cs.ubc.ca;tju.edu.cn;ufv.ca;cs.ubc.ca", + "github": "https://github.com/raymondzmc/Attention-Pattern-Exploitation10189", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1+0;2;0", + "aff_unique_norm": "University of British Columbia;Tianjin University;University of Fraser Valley", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ubc.ca;http://www.tju.edu.cn;https://www.ufv.ca", + "aff_unique_abbr": "UBC;TJU;UFV", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Vancouver;", + "aff_country_unique_index": "0;0;0;1+0;0;0", + "aff_country_unique": "Canada;China" + }, + { + "id": "2022.emnlp-main.549", + "title": "Human-Machine Collaboration Approaches to Build a Dialogue Dataset for Hate Speech Countering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Fighting online hate speech is a challenge that is usually addressed using Natural Language Processing via automatic detection and removal of hate content. Besides this approach, counter narratives have emerged as an effective tool employed by NGOs to respond to online hate on social media platforms. For this reason, Natural Language Generation is currently being studied as a way to automatize counter narrative writing. However, the existing resources necessary to train NLG models are limited to 2-turn interactions (a hate speech and a counter narrative as response), while in real life, interactions can consist of multiple turns. In this paper, we present a hybrid approach for dialogical data collection, which combines the intervention of human expert annotators over machine generated dialogues obtained using 19 different configurations. The result of this work is DIALOCONAN, the first dataset comprising over 3000 fictitious multi-turn dialogues between a hater and an NGO operator, covering 6 targets of hate.", + "author": "Helena Bonaldi; Sara Dellantonio; Serra Sinem Tekiro\u011flu; Marco Guerini", + "authorids": "/h/helena-bonaldi/; /s/sara-dellantonio/; /s/serra-sinem-tekiroglu/; /m/marco-guerini/", + "bibtex": "@inproceedings{bonaldi-etal-2022-human,\n title = \"Human-Machine Collaboration Approaches to Build a Dialogue Dataset for Hate Speech Countering\",\n author = \"Bonaldi, Helena and\n Dellantonio, Sara and\n Tekiro{\\u{g}}lu, Serra Sinem and\n Guerini, Marco\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.549/\",\n doi = \"10.18653/v1/2022.emnlp-main.549\",\n pages = \"8031--8049\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.549.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.549/", + "pdf_size": 469571, + "gs_citation": 41, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8105236079330133825&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "University of Trento, Italy + Fondazione Bruno Kessler, Via Sommarive 18, Povo, Trento, Italy; Free University of Bozen-Bolzano, Italy + Fondazione Bruno Kessler, Via Sommarive 18, Povo, Trento, Italy; Fondazione Bruno Kessler, Via Sommarive 18, Povo, Trento, Italy; Fondazione Bruno Kessler, Via Sommarive 18, Povo, Trento, Italy", + "aff_domain": "fbk.eu;fbk.eu;fbk.eu;fbk.eu", + "email": "fbk.eu;fbk.eu;fbk.eu;fbk.eu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2+1;1;1", + "aff_unique_norm": "University of Trento;Fondazione Bruno Kessler;Free University of Bozen-Bolzano", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.unitn.it;https://www.fbk.eu;https://www.unibz.it", + "aff_unique_abbr": "UniTN;FBK;UNIBZ", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0", + "aff_country_unique": "Italy" + }, + { + "id": "2022.findings-emnlp.548", + "title": "Human-in-the-Loop Hate Speech Classification in a Multilingual Context", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The shift of public debate to the digital sphere has been accompanied by a rise in online hate speech. While many promising approaches for hate speech classification have been proposed, studies often focus only on a single language, usually English, and do not address three key concerns: post-deployment performance, classifier maintenance and infrastructural limitations. In this paper, we introduce a new human-in-the-loop BERT-based hate speech classification pipeline and trace its development from initial data collection and annotation all the way to post-deployment. Our classifier, trained using data from our original corpus of over 422k examples, is specifically developed for the inherently multilingual setting of Switzerland and outperforms with its F1 score of 80.5 the currently best-performing BERT-based multilingual classifier by 5.8 F1 points in German and 3.6 F1 points in French. Our systematic evaluations over a 12-month period further highlight the vital importance of continuous, human-in-the-loop classifier maintenance to ensure robust hate speech classification post-deployment.", + "author": "Ana Kotarcic; Dominik Hangartner; Fabrizio Gilardi; Selina Kurer; Karsten Donnay", + "authorids": "/a/ana-kotarcic/; /d/dominik-hangartner/; /f/fabrizio-gilardi/; /s/selina-kurer/; /k/karsten-donnay/", + "bibtex": "@inproceedings{kotarcic-etal-2022-human,\n title = \"Human-in-the-Loop Hate Speech Classification in a Multilingual Context\",\n author = \"Kotarcic, Ana and\n Hangartner, Dominik and\n Gilardi, Fabrizio and\n Kurer, Selina and\n Donnay, Karsten\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.548/\",\n pages = \"7414--7442\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.548.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.548/", + "pdf_size": 308408, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13064510241540144320&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 6, + "aff": "Department of Political Science, University of Zurich, Switzerland+Immigration Policy Lab, ETH Zurich, Switzerland; Immigration Policy Lab, ETH Zurich, Switzerland; Department of Political Science, University of Zurich, Switzerland; Immigration Policy Lab, ETH Zurich, Switzerland; Department of Political Science, University of Zurich, Switzerland", + "aff_domain": "gess.ethz.ch;gess.ethz.ch;ipz.uzh.ch;gess.ethz.ch;ipz.uzh.ch", + "email": "gess.ethz.ch;gess.ethz.ch;ipz.uzh.ch;gess.ethz.ch;ipz.uzh.ch", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;0;1;0", + "aff_unique_norm": "University of Zurich;ETH Zurich", + "aff_unique_dep": "Department of Political Science;Immigration Policy Lab", + "aff_unique_url": "https://www.unizh.ch;https://www.ethz.ch", + "aff_unique_abbr": "UZH;ETHZ", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "Switzerland" + }, + { + "id": "2022.emnlp-main.30", + "title": "HydraSum: Disentangling Style Features in Text Summarization with Multi-Decoder Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Summarization systems make numerous \u201cdecisions\u201d about summary properties during inference, e.g. degree of copying, specificity and length of outputs, etc. However, these are implicitly encoded within model parameters and specific styles cannot be enforced. To address this, we introduce HydraSum, a new summarization architecture that extends the single decoder framework of current models to a mixture-of-experts version with multiple decoders. We show that HydraSum\u2019s multiple decoders automatically learn contrasting summary styles when trained under the standard training objective without any extra supervision. Through experiments on three summarization datasets (CNN, Newsroom and XSum), we show that HydraSum provides a simple mechanism to obtain stylistically-diverse summaries by sampling from either individual decoders or their mixtures, outperforming baseline models. Finally, we demonstrate that a small modification to the gating strategy during training can enforce an even stricter style partitioning, e.g. high- vs low-abstractiveness or high- vs low-specificity, allowing users to sample from a larger area in the generation space and vary summary styles along multiple dimensions.", + "author": "Tanya Goyal; Nazneen Rajani; Wenhao Liu; Wojciech Kryscinski", + "authorids": "/t/tanya-goyal/; /n/nazneen-rajani/; /w/wenhao-liu/; /w/wojciech-kryscinski/", + "bibtex": "@inproceedings{goyal-etal-2022-hydrasum,\n title = \"{H}ydra{S}um: Disentangling Style Features in Text Summarization with Multi-Decoder Models\",\n author = \"Goyal, Tanya and\n Rajani, Nazneen and\n Liu, Wenhao and\n Kryscinski, Wojciech\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.30/\",\n doi = \"10.18653/v1/2022.emnlp-main.30\",\n pages = \"464--479\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.30.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.30/", + "pdf_size": 2908276, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5726953392212174201&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Department of Computer Science, The University of Texas at Austin; Hugging Face; Faire; Salesforce Research", + "aff_domain": "utexas.edu; ; ; ", + "email": "utexas.edu; ; ; ", + "github": "https://github.com/salesforce/hydra-sum", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "The University of Texas at Austin;Hugging Face;Faire;Salesforce", + "aff_unique_dep": "Department of Computer Science;;;Salesforce Research", + "aff_unique_url": "https://www.utexas.edu;https://huggingface.co;https://www.faire.com;https://research.salesforce.com", + "aff_unique_abbr": "UT Austin;Hugging Face;Faire;Salesforce", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Austin;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.541", + "title": "Hyper-X: A Unified Hypernetwork for Multi-Task Multilingual Transfer", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Massively multilingual models are promising for transfer learning across tasks and languages. However, existing methods are unable to fully leverage training data when it is available in different task-language combinations. To exploit such heterogeneous supervision, we propose Hyper-X, a single hypernetwork that unifies multi-task and multilingual learning with efficient adaptation. It generates weights for adapter modules conditioned on both tasks and language embeddings. By learning to combine task and language-specific knowledge, our model enables zero-shot transfer for unseen languages and task-language combinations. Our experiments on a diverse set of languages demonstrate that Hyper-X achieves the best or competitive gain when a mixture of multiple resources is available, while on par with strong baseline in the standard scenario. Hyper-X is also considerably more efficient in terms of parameters and resources compared to methods that train separate adapters. Finally, Hyper-X consistently produces strong results in few-shot scenarios for new languages, showing the versatility of our approach beyond zero-shot transfer.", + "author": "Ahmet \u00dcst\u00fcn; Arianna Bisazza; Gosse Bouma; Gertjan van Noord; Sebastian Ruder", + "authorids": "/a/ahmet-ustun/; /a/arianna-bisazza/; /g/gosse-bouma/; /g/gertjan-van-noord/; /s/sebastian-ruder/", + "bibtex": "@inproceedings{ustun-etal-2022-hyper,\n title = \"Hyper-{X}: A Unified Hypernetwork for Multi-Task Multilingual Transfer\",\n author = {{\\\"U}st{\\\"u}n, Ahmet and\n Bisazza, Arianna and\n Bouma, Gosse and\n van Noord, Gertjan and\n Ruder, Sebastian},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.541/\",\n doi = \"10.18653/v1/2022.emnlp-main.541\",\n pages = \"7934--7949\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.541.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.541/", + "pdf_size": 859966, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9853981569111297870&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of Groningen; University of Groningen; University of Groningen; University of Groningen; Google Research", + "aff_domain": "rug.nl; ; ; ; ", + "email": "rug.nl; ; ; ; ", + "github": "https://github.com/ahmetustun/hyperx", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "University of Groningen;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.rug.nl;https://research.google", + "aff_unique_abbr": "RUG;Google Research", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "Netherlands;United States" + }, + { + "id": "2022.findings-emnlp.124", + "title": "Hyperdecoders: Instance-specific decoders for multi-task NLP", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We investigate input-conditioned hypernetworks for multi-tasking in NLP, generating parameter-efficient adaptations for a decoder using a hypernetwork conditioned on the output of an encoder. This approach produces a unique decoder adaptation for every input instance, allowing the network a larger degree of flexibility than prior work that only produces one decoder adaptation per task. We apply our method to sequence classification tasks, extractive QA, and summarisation and find that it surpasses previous parameter efficient fine-tuning methods and often outperforms fully finetuning the underlying model. An analysis of the embeddings used by our hypernetwork shows that they are sensitive to output label and type, suggesting that our approach better maps from encoder representations to output labels. Our code is publicly available at https://github.com/allenai/hyperdecoders.", + "author": "Hamish Ivison; Matthew Peters", + "authorids": "/h/hamish-ivison/; /m/matthew-e-peters/", + "bibtex": "@inproceedings{ivison-peters-2022-hyperdecoders,\n title = \"Hyperdecoders: Instance-specific decoders for multi-task {NLP}\",\n author = \"Ivison, Hamish and\n Peters, Matthew\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.124/\",\n doi = \"10.18653/v1/2022.findings-emnlp.124\",\n pages = \"1715--1730\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.124.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.124/", + "pdf_size": 1517674, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=788524860681360719&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Allen Institute for AI; Allen Institute for AI", + "aff_domain": "allenai.org;allenai.org", + "email": "allenai.org;allenai.org", + "github": "https://github.com/allenai/hyperdecoders", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Allen Institute for AI", + "aff_unique_dep": "", + "aff_unique_url": "https://allenai.org", + "aff_unique_abbr": "AI2", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.475", + "title": "Hypoformer: Hybrid Decomposition Transformer for Edge-friendly Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transformer has been demonstrated effective in Neural Machine Translation (NMT). However, it is memory-consuming and time-consuming in edge devices, resulting in some difficulties for real-time feedback. To compress and accelerate Transformer, we propose a Hybrid Tensor-Train (HTT) decomposition, which retains full rank and meanwhile reduces operations and parameters. A Transformer using HTT, named Hypoformer, consistently and notably outperforms the recent light-weight SOTA methods on three standard translation tasks under different parameter and speed scales. In extreme low resource scenarios, Hypoformer has 7.1 points absolute improvement in BLEU and 1.27 X speedup than vanilla Transformer on IWSLT\u201914 De-En task.", + "author": "Sunzhu Li; Peng Zhang; Guobing Gan; Xiuqing Lv; Benyou Wang; Junqiu Wei; Xin Jiang", + "authorids": "/s/sunzhu-li/; /p/peng-zhang/; /g/guobing-gan/; /x/xiuqing-lv/; /b/benyou-wang/; /j/junqiu-wei/; /x/xin-jiang/", + "bibtex": "@inproceedings{li-etal-2022-hypoformer,\n title = \"Hypoformer: Hybrid Decomposition Transformer for Edge-friendly Neural Machine Translation\",\n author = \"Li, Sunzhu and\n Zhang, Peng and\n Gan, Guobing and\n Lv, Xiuqing and\n Wang, Benyou and\n Wei, Junqiu and\n Jiang, Xin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.475/\",\n doi = \"10.18653/v1/2022.emnlp-main.475\",\n pages = \"7056--7068\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.475.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.475/", + "pdf_size": 814684, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12399836267675793179&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "College of Intelligence and Computing, Tianjin University, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China; College of Intelligence and Computing, Tianjin University, Tianjin, China; School of Data Science, The Chinese University of Hong Kong, Shenzhen, China; The Hong Kong Polytechnic University, China; Huawei Noah\u2019s Ark Lab, China", + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;cuhk.edu.cn;polyu.edu.hk;huawei.com", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;cuhk.edu.cn;polyu.edu.hk;huawei.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;2;3", + "aff_unique_norm": "Tianjin University;The Chinese University of Hong Kong;The Hong Kong Polytechnic University;Huawei Noah\u2019s Ark Lab", + "aff_unique_dep": "College of Intelligence and Computing;School of Data Science;;", + "aff_unique_url": "http://www.tju.edu.cn;https://www.cuhk.edu.cn;https://www.polyu.edu.hk;https://www.huawei.com/en/ai/noahs-ark-lab", + "aff_unique_abbr": "Tianjin University;CUHK;PolyU;HNAL", + "aff_campus_unique_index": "0;0;0;0;1", + "aff_campus_unique": "Tianjin;Shenzhen;", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.465", + "title": "IDK-MRC: Unanswerable Questions for Indonesian Machine Reading Comprehension", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Machine Reading Comprehension (MRC) has become one of the essential tasks in Natural Language Understanding (NLU) as it is often included in several NLU benchmarks (Liang et al., 2020; Wilie et al., 2020). However, most MRC datasets only have answerable question type, overlooking the importance of unanswerable questions. MRC models trained only on answerable questions will select the span that is most likely to be the answer, even when the answer does not actually exist in the given passage (Rajpurkar et al., 2018). This problem especially remains in medium- to low-resource languages like Indonesian. Existing Indonesian MRC datasets (Purwarianti et al., 2007; Clark et al., 2020) are still inadequate because of the small size and limited question types, i.e., they only cover answerable questions. To fill this gap, we build a new Indonesian MRC dataset called I(n)don\u2019tKnow- MRC (IDK-MRC) by combining the automatic and manual unanswerable question generation to minimize the cost of manual dataset construction while maintaining the dataset quality. Combined with the existing answerable questions, IDK-MRC consists of more than 10K questions in total. Our analysis shows that our dataset significantly improves the performance of Indonesian MRC models, showing a large improvement for unanswerable questions.", + "author": "Rifki Afina Putri; Alice Oh", + "authorids": "/r/rifki-afina-putri/; /a/alice-oh/", + "bibtex": "@inproceedings{putri-oh-2022-idk,\n title = \"{IDK}-{MRC}: Unanswerable Questions for {I}ndonesian Machine Reading Comprehension\",\n author = \"Putri, Rifki Afina and\n Oh, Alice\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.465/\",\n doi = \"10.18653/v1/2022.emnlp-main.465\",\n pages = \"6918--6933\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.465.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.465/", + "pdf_size": 1518861, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5510069766929601067&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "School of Computing, KAIST, South Korea; School of Computing, KAIST, South Korea", + "aff_domain": "kaist.ac.kr;kaist.edu", + "email": "kaist.ac.kr;kaist.edu", + "github": "https://github.com/rifkiaputri/IDK-MRC", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "KAIST", + "aff_unique_dep": "School of Computing", + "aff_unique_url": "https://www.kaist.ac.kr", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.emnlp-main.576", + "title": "IELM: An Open Information Extraction Benchmark for Pre-Trained Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We introduce a new open information extraction (OIE) benchmark for pre-trained language models (LM). Recent studies have demonstrated that pre-trained LMs, such as BERT and GPT, may store linguistic and relational knowledge. In particular, LMs are able to answer \u201cfill-in-the-blank\u201d questions when given a pre-defined relation category. Instead of focusing on pre-defined relations, we create an OIE benchmark aiming to fully examine the open relational information present in the pre-trained LMs. We accomplish this by turning pre-trained LMs into zero-shot OIE systems. Surprisingly, pre-trained LMs are able to obtain competitive performance on both standard OIE datasets (CaRB and Re-OIE2016) and two new large-scale factual OIE datasets (TAC KBP-OIE and Wikidata-OIE) that we establish via distant supervision. For instance, the zero-shot pre-trained LMs outperform the F1 score of the state-of-the-art supervised OIE methods on our factual OIE datasets without needing to use any training sets.", + "author": "Chenguang Wang; Xiao Liu; Dawn Song", + "authorids": "/c/chenguang-wang/; /x/xiao-liu/; /d/dawn-song/", + "bibtex": "@inproceedings{wang-etal-2022-ielm,\n title = \"{IELM}: An Open Information Extraction Benchmark for Pre-Trained Language Models\",\n author = \"Wang, Chenguang and\n Liu, Xiao and\n Song, Dawn\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.576/\",\n doi = \"10.18653/v1/2022.emnlp-main.576\",\n pages = \"8417--8437\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.576.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.576/", + "pdf_size": 630983, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=125030080560588495&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 7, + "aff": "Washington University in St. Louis; Tsinghua University; UC Berkeley", + "aff_domain": "wustl.edu;mails.tsinghua.edu.cn;berkeley.edu", + "email": "wustl.edu;mails.tsinghua.edu.cn;berkeley.edu", + "github": "https://github.com/cgraywang/IELM", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Washington University in St. Louis;Tsinghua University;University of California, Berkeley", + "aff_unique_dep": ";;", + "aff_unique_url": "https://wustl.edu;https://www.tsinghua.edu.cn;https://www.berkeley.edu", + "aff_unique_abbr": "WashU;THU;UC Berkeley", + "aff_campus_unique_index": "0;2", + "aff_campus_unique": "St. Louis;;Berkeley", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.762", + "title": "IM2: an Interpretable and Multi-category Integrated Metric Framework for Automatic Dialogue Evaluation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Evaluation metrics shine the light on the best models and thus strongly influence the research directions, such as the recently developed dialogue metrics USR, FED, and GRADE. However, most current metrics evaluate the dialogue data as isolated and static because they only focus on a single quality or several qualities. To mitigate the problem, this paper proposes an interpretable, multi-faceted, and controllable framework IM^2 (Interpretable and Multi-category Integrated Metric) to combine a large number of metrics which are good at measuring different qualities. The IM^2 framework first divides current popular dialogue qualities into different categories and then applies or proposes dialogue metrics to measure the qualities within each category and finally generates an overall IM^2 score. An initial version of IM^2 was submitted to the AAAI 2022 Track5.1@DSTC10 challenge and took the 2^nd place on both of the development and test leaderboard. After the competition, we develop more metrics and improve the performance of our model. We compare IM^2 with other 13 current dialogue metrics and experimental results show that IM^2 correlates more strongly with human judgments than any of them on each evaluated dataset.", + "author": "Zhihua Jiang; Guanghui Ye; Dongning Rao; Di Wang; Xin Miao", + "authorids": "/z/zhihua-jiang/; /g/guanghui-ye/; /d/dongning-rao/; /d/di-wang/; /x/xin-miao/", + "bibtex": "@inproceedings{jiang-etal-2022-im2,\n title = \"{IM}2: an Interpretable and Multi-category Integrated Metric Framework for Automatic Dialogue Evaluation\",\n author = \"Jiang, Zhihua and\n Ye, Guanghui and\n Rao, Dongning and\n Wang, Di and\n Miao, Xin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.762/\",\n doi = \"10.18653/v1/2022.emnlp-main.762\",\n pages = \"11091--11103\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.762.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.762/", + "pdf_size": 422353, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2821652828088068106&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Department of Computer Science, Jinan University, Guangzhou 510632, P. R. China; Department of Computer Science, Jinan University, Guangzhou 510632, P. R. China; School of Computer, Guangdong University of Technology, Guangzhou 510006, P. R. China; Department of Computer Science, Jinan University, Guangzhou 510632, P. R. China; School of Computer Science, Wuhan University, Wuhan 430072, P.R. China", + "aff_domain": "jnu.edu.cn;stu2020.jnu.edu.cn;gdut.edu.cn;stu2020.jnu.edu.cn;whu.edu.cn", + "email": "jnu.edu.cn;stu2020.jnu.edu.cn;gdut.edu.cn;stu2020.jnu.edu.cn;whu.edu.cn", + "github": "https://github.com/Jnunlplab/IM2", + "project": "https://chateval.org/dstc10", + "author_num": 5, + "aff_unique_index": "0;0;1;0;2", + "aff_unique_norm": "Jinan University;Guangdong University of Technology;Wuhan University", + "aff_unique_dep": "Department of Computer Science;School of Computer;School of Computer Science", + "aff_unique_url": "http://www.jnu.edu.cn;;http://www.whu.edu.cn", + "aff_unique_abbr": "JNU;;WHU", + "aff_campus_unique_index": "0;0;0;0;1", + "aff_campus_unique": "Guangzhou;Wuhan", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.584", + "title": "IRRGN: An Implicit Relational Reasoning Graph Network for Multi-turn Response Selection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The task of response selection in multi-turn dialogue is to find the best option from all candidates. In order to improve the reasoning ability of the model, previous studies pay more attention to using explicit algorithms to model the dependencies between utterances, which are deterministic, limited and inflexible. In addition, few studies consider differences between the options before and after reasoning. In this paper, we propose an Implicit Relational Reasoning Graph Network to address these issues, which consists of the Utterance Relational Reasoner (URR) and the Option Dual Comparator (ODC). URR aims to implicitly extract dependencies between utterances, as well as utterances and options, and make reasoning with relational graph convolutional networks. ODC focuses on perceiving the difference between the options through dual comparison, which can eliminate the interference of the noise options. Experimental results on two multi-turn dialogue reasoning benchmark datasets MuTual and MuTualplus show that our method significantly improves the baseline of four pre-trained language models and achieves state-of-the-art performance. The model surpasses human performance for the first time on the MuTual dataset.", + "author": "Jingcheng Deng; Hengwei Dai; Xuewei Guo; Yuanchen Ju; Wei Peng", + "authorids": "/j/jingcheng-deng/; /h/hengwei-dai/; /x/xuewei-guo/; /y/yuanchen-ju/; /w/wei-peng/", + "bibtex": "@inproceedings{deng-etal-2022-irrgn,\n title = \"{IRRGN}: An Implicit Relational Reasoning Graph Network for Multi-turn Response Selection\",\n author = \"Deng, Jingcheng and\n Dai, Hengwei and\n Guo, Xuewei and\n Ju, Yuanchen and\n Peng, Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.584/\",\n doi = \"10.18653/v1/2022.emnlp-main.584\",\n pages = \"8529--8541\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.584.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.584/", + "pdf_size": 1296359, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6538392152650659683&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "College of Computer and Information Science, Southwest University; College of Computer and Information Science, Southwest University; yz-intelligence Inc; College of Computer and Information Science, Southwest University; Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China + School of Cyber Security, University of Chinese Academy of Sciences, Beijing, China", + "aff_domain": "163.com;163.com;gmail.com;163.com;iie.ac.cn", + "email": "163.com;163.com;gmail.com;163.com;iie.ac.cn", + "github": "https://github.com/DJC-GO-SOLO/IRRGN", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;2+3", + "aff_unique_norm": "Southwest University;yz-intelligence Inc;Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "College of Computer and Information Science;;Institute of Information Engineering;School of Cyber Security", + "aff_unique_url": "http://www.swu.edu.cn;;http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": ";;CAS;UCAS", + "aff_campus_unique_index": "1+1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "China;" + }, + { + "id": "2022.findings-emnlp.454", + "title": "Identifying Human Strategies for Generating Word-Level Adversarial Examples", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Adversarial examples in NLP are receiving increasing research attention. One line of investigation is the generation of word-level adversarial examples against fine-tuned Transformer models that preserve naturalness and grammaticality. Previous work found that human- and machine-generated adversarial examples are comparable in their naturalness and grammatical correctness. Most notably, humans were able to generate adversarial examples much more effortlessly than automated attacks. In this paper, we provide a detailed analysis of exactly how humans create these adversarial examples. By exploring the behavioural patterns of human workers during the generation process, we identify statistically significant tendencies based on which words humans prefer to select for adversarial replacement (e.g., word frequencies, word saliencies, sentiment) as well as where and when words are replaced in an input sequence. With our findings, we seek to inspire efforts that harness human strategies for more robust NLP models.", + "author": "Maximilian Mozes; Bennett Kleinberg; Lewis Griffin", + "authorids": "/m/maximilian-mozes/; /b/bennett-kleinberg/; /l/lewis-griffin/", + "bibtex": "@inproceedings{mozes-etal-2022-identifying,\n title = \"Identifying Human Strategies for Generating Word-Level Adversarial Examples\",\n author = \"Mozes, Maximilian and\n Kleinberg, Bennett and\n Griffin, Lewis\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.454/\",\n doi = \"10.18653/v1/2022.findings-emnlp.454\",\n pages = \"6118--6126\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.454.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.454/", + "pdf_size": 241765, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8459235159251020222&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University College London; University College London + Tilburg University; University College London", + "aff_domain": "cs.ucl.ac.uk;tilburguniversity.edu;cs.ucl.ac.uk", + "email": "cs.ucl.ac.uk;tilburguniversity.edu;cs.ucl.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;0", + "aff_unique_norm": "University College London;Tilburg University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucl.ac.uk;https://www.tilburguniversity.edu/", + "aff_unique_abbr": "UCL;Tilburg U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;0", + "aff_country_unique": "United Kingdom;Netherlands" + }, + { + "id": "2022.emnlp-main.781", + "title": "Identifying Physical Object Use in Sentences", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Commonsense knowledge about the typicalfunctions of physical objects allows people tomake inferences during sentence understanding.For example, we infer that \u201cSam enjoyedthe book\u201d means that Sam enjoyed reading thebook, even though the action is implicit. Priorresearch has focused on learning the prototypicalfunctions of physical objects in order toenable inferences about implicit actions. Butmany sentences refer to objects even when theyare not used (e.g., \u201cThe book fell\u201d). We arguethat NLP systems need to recognize whether anobject is being used before inferring how theobject is used. We define a new task called ObjectUse Classification that determines whethera physical object mentioned in a sentence wasused or likely will be used. We introduce a newdataset for this task and present a classificationmodel that exploits data augmentation methodsand FrameNet when fine-tuning a pre-trainedlanguage model. We also show that object useclassification combined with knowledge aboutthe prototypical functions of objects has thepotential to yield very good inferences aboutimplicit and anticipated actions.", + "author": "Tianyu Jiang; Ellen Riloff", + "authorids": "/t/tianyu-jiang/; /e/ellen-riloff/", + "bibtex": "@inproceedings{jiang-riloff-2022-identifying,\n title = \"Identifying Physical Object Use in Sentences\",\n author = \"Jiang, Tianyu and\n Riloff, Ellen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.781/\",\n doi = \"10.18653/v1/2022.emnlp-main.781\",\n pages = \"11362--11372\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.781.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.781/", + "pdf_size": 307956, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13617207977972821033&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff": "School of Computing, University of Utah; School of Computing, University of Utah", + "aff_domain": "cs.utah.edu;cs.utah.edu", + "email": "cs.utah.edu;cs.utah.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Utah", + "aff_unique_dep": "School of Computing", + "aff_unique_url": "https://www.utah.edu", + "aff_unique_abbr": "U of U", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Utah", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.59", + "title": "Impact of Pretraining Term Frequencies on Few-Shot Numerical Reasoning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Pretrained Language Models (LMs) have demonstrated ability to perform numerical reasoning by extrapolating from a few examples in few-shot settings. However, the extent to which this extrapolation relies on robust reasoning is unclear. In this paper, we investigate how well these models reason with terms that are less frequent in the pretraining data. In particular, we examine the correlations between the model performance on test instances and the frequency of terms from those instances in the pretraining data. We measure the strength of this correlation for a number of GPT-based language models (pretrained on the Pile dataset) on various numerical deduction tasks (e.g., arithmetic and unit conversion). Our results consistently demonstrate that models are more accurate on instances whose terms are more prevalent, in some cases above 70% (absolute) more accurate on the top 10% frequent terms in comparison to the bottom 10%. Overall, although LMs appear successful at few-shot numerical reasoning, our results raise the question of how much models actually generalize beyond pretraining data, and we encourage researchers to take the pretraining data into account when interpreting evaluation results.", + "author": "Yasaman Razeghi; Robert L Logan IV; Matt Gardner; Sameer Singh", + "authorids": "/y/yasaman-razeghi/; /r/robert-l-logan-iv/; /m/matt-gardner/; /s/sameer-singh/", + "bibtex": "@inproceedings{razeghi-etal-2022-impact,\n title = \"Impact of Pretraining Term Frequencies on Few-Shot Numerical Reasoning\",\n author = \"Razeghi, Yasaman and\n Logan IV, Robert L and\n Gardner, Matt and\n Singh, Sameer\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.59/\",\n doi = \"10.18653/v1/2022.findings-emnlp.59\",\n pages = \"840--854\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.59.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.59/", + "pdf_size": 664278, + "gs_citation": -1, + "gs_cited_by_link": "", + "gs_version_total": 0, + "aff": "University of California, Irvine; Dataminr Inc.; Microsoft Semantic Machines; Allen Institute for AI", + "aff_domain": "uci.edu;dataminr.com;microsoft.com;uci.edu", + "email": "uci.edu;dataminr.com;microsoft.com;uci.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "University of California, Irvine;Dataminr;Microsoft;Allen Institute for AI", + "aff_unique_dep": ";;Semantic Machines;", + "aff_unique_url": "https://www.uci.edu;https://www.dataminr.com;https://www.microsoft.com;https://allenai.org", + "aff_unique_abbr": "UCI;Dataminr;Microsoft;AI2", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Irvine;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.32", + "title": "Improve Interpretability of Neural Networks via Sparse Contrastive Coding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Although explainable artificial intelligence (XAI) has achieved remarkable developments in recent years, there are few efforts have been devoted to the following problems, namely, i) how to develop an explainable method that could explain the black-box in a model-agnostic way? and ii) how to improve the performance and interpretability of the black-box using such explanations instead of pre-collected important attributions? To explore the potential solution, we propose a model-agnostic explanation method termed as Sparse Contrastive Coding (SCC) and verify its effectiveness in text classification and natural language inference. In brief, SCC explains the feature attributions which characterize the importance of words based on the hidden states of each layer of the model. With such word-level explainability, SCC adaptively divides the input sentences into foregrounds and backgrounds in terms of task relevance. Through maximizing the similarity between the foregrounds and input sentences while minimizing the similarity between the backgrounds and input sentences, SSC employs a supervised contrastive learning loss to boost the interpretability and performance of the model. Extensive experiments show the superiority of our method over five state-of-the-art methods in terms of interpretability and classification measurements. The code is available at https://pengxi.me.", + "author": "Junhong Liu; Yijie Lin; Liang Jiang; Jia Liu; Zujie Wen; Xi Peng", + "authorids": "/j/junhong-liu/; /y/yijie-lin/; /l/liang-jiang/; /j/jia-liu/; /z/zujie-wen/; /x/xi-peng/", + "bibtex": "@inproceedings{liu-etal-2022-improve,\n title = \"Improve Interpretability of Neural Networks via Sparse Contrastive Coding\",\n author = \"Liu, Junhong and\n Lin, Yijie and\n Jiang, Liang and\n Liu, Jia and\n Wen, Zujie and\n Peng, Xi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.32/\",\n doi = \"10.18653/v1/2022.findings-emnlp.32\",\n pages = \"460--470\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.32.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.32/", + "pdf_size": 2580117, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3020719433575615355&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Ant Financial Services Group, China; College of Computer Science, Sichuan University, China; Ant Financial Services Group, China; Ant Financial Services Group, China; Ant Financial Services Group, China; College of Computer Science, Sichuan University, China", + "aff_domain": "antgroup.com;gmail.com;antgroup.com;antgroup.com;antgroup.com;gmail.com", + "email": "antgroup.com;gmail.com;antgroup.com;antgroup.com;antgroup.com;gmail.com", + "github": "", + "project": "https://pengxi.me", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;1", + "aff_unique_norm": "Ant Financial Services Group;Sichuan University", + "aff_unique_dep": ";College of Computer Science", + "aff_unique_url": "https://www.antgroup.com;https://www.scu.edu.cn", + "aff_unique_abbr": "Ant Financial;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.464", + "title": "Improved Knowledge Distillation for Pre-trained Language Models via Knowledge Selection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Knowledge distillation addresses the problem of transferring knowledge from a teacher model to a student model.In this process, we typically have multiple types of knowledge extracted from the teacher model.The problem is to make full use of them to train the student model.Our preliminary study shows that: (1) not all of the knowledge is necessary for learning a good student model, and (2) knowledge distillation can benefit from certain knowledge at different training steps.In response to these, we propose an actor-critic approach to selecting appropriate knowledge to transfer during the process of knowledge distillation.In addition, we offer a refinement of the training algorithm to ease the computational burden.Experimental results on the GLUE datasets show that our method outperforms several strong knowledge distillation baselines significantly.", + "author": "Chenglong Wang; Yi Lu; Yongyu Mu; Yimin Hu; Tong Xiao; Jingbo Zhu", + "authorids": "/c/chenglong-wang/; /y/yi-lu/; /y/yongyu-mu/; /y/yimin-hu/; /t/tong-xiao/; /j/jingbo-zhu/", + "bibtex": "@inproceedings{wang-etal-2022-improved,\n title = \"Improved Knowledge Distillation for Pre-trained Language Models via Knowledge Selection\",\n author = \"Wang, Chenglong and\n Lu, Yi and\n Mu, Yongyu and\n Hu, Yimin and\n Xiao, Tong and\n Zhu, Jingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.464/\",\n doi = \"10.18653/v1/2022.findings-emnlp.464\",\n pages = \"6232--6244\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.464.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.464/", + "pdf_size": 575205, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3716202974559244884&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China+ NiuTrans Research, Shenyang, China; NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China+ NiuTrans Research, Shenyang, China; NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China; NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China; NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China+ NiuTrans Research, Shenyang, China; NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China+ NiuTrans Research, Shenyang, China", + "aff_domain": "gmail.com;gmail.com; ; ;mail.neu.edu.cn;mail.neu.edu.cn", + "email": "gmail.com;gmail.com; ; ;mail.neu.edu.cn;mail.neu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0;0;0+1;0+1", + "aff_unique_norm": "Northeastern University;NiuTrans Research", + "aff_unique_dep": "School of Computer Science and Engineering;", + "aff_unique_url": "http://www.neu.edu.cn/;", + "aff_unique_abbr": "NEU;", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Shenyang;", + "aff_country_unique_index": "0+0;0+0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.220", + "title": "Improved Universal Sentence Embeddings with Prompt-based Contrastive Learning and Energy-based Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Contrastive learning has been demonstrated to be effective in enhancing pre-trained language models (PLMs) to derive superior universal sentence embeddings. However, existing contrastive methods still have two limitations. Firstly, previous works may acquire poor performance under domain shift settings, thus hindering the application of sentence representations in practice. We attribute this low performance to the over-parameterization of PLMs with millions of parameters. To alleviate it, we propose PromCSE (Prompt-based Contrastive Learning for Sentence Embeddings), which only trains small-scale Soft Prompt (i.e., a set of trainable vectors) while keeping PLMs fixed. Secondly, the commonly used NT-Xent loss function of contrastive learning does not fully exploit hard negatives in supervised learning settings. To this end, we propose to integrate an Energy-based Hinge loss to enhance the pairwise discriminative power, inspired by the connection between the NT-Xent loss and the Energy-based Learning paradigm. Empirical results on seven standard semantic textual similarity (STS) tasks and a domain-shifted STS task both show the effectiveness of our method compared with the current state-of-the-art sentence embedding models.", + "author": "Yuxin Jiang; Linhan Zhang; Wei Wang", + "authorids": "/y/yuxin-jiang/; /l/linhan-zhang/; /w/wei-wang/", + "bibtex": "@inproceedings{jiang-etal-2022-improved,\n title = \"Improved Universal Sentence Embeddings with Prompt-based Contrastive Learning and Energy-based Learning\",\n author = \"Jiang, Yuxin and\n Zhang, Linhan and\n Wang, Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.220/\",\n doi = \"10.18653/v1/2022.findings-emnlp.220\",\n pages = \"3021--3035\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.220.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.220/", + "pdf_size": 639071, + "gs_citation": 46, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5793600040051653703&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "The Hong Kong University of Science and Technology (Guangzhou)+The Hong Kong University of Science and Technology; School of Computer Science and Engineering, The University of New South Wales; The Hong Kong University of Science and Technology (Guangzhou)+The Hong Kong University of Science and Technology", + "aff_domain": "connect.ust.hk;student.unsw.edu.au;ust.hk", + "email": "connect.ust.hk;student.unsw.edu.au;ust.hk", + "github": "https://github.com/YJiangcm/PromCSE", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2;0+1", + "aff_unique_norm": "The Hong Kong University of Science and Technology;Hong Kong University of Science and Technology;The University of New South Wales", + "aff_unique_dep": ";;School of Computer Science and Engineering", + "aff_unique_url": "https://www.ust.hk;https://www.ust.hk;https://www.unsw.edu.au", + "aff_unique_abbr": "HKUST;HKUST;UNSW", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Guangzhou;", + "aff_country_unique_index": "0+0;1;0+0", + "aff_country_unique": "China;Australia" + }, + { + "id": "2022.emnlp-main.785", + "title": "Improved grammatical error correction by ranking elementary edits", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We offer a two-stage reranking method for grammatical error correction: the first model serves as edit generator, while the second classifies the proposed edits as correct or false. We show how to use both encoder-decoder and sequence labeling models for the first step of our pipeline. We achieve state-of-the-art quality on BEA 2019 English dataset even using weak BERT-GEC edit generator. Combining our roberta-base scorer with state-of-the-art GECToR edit generator, we surpass GECToR by 2-3%. With a larger model we establish a new SOTA on BEA development and test sets. Our model also sets a new SOTA on Russian, despite using smaller models and less data than the previous approaches.", + "author": "Alexey Sorokin", + "authorids": "/a/alexey-sorokin/", + "bibtex": "@inproceedings{sorokin-2022-improved,\n title = \"Improved grammatical error correction by ranking elementary edits\",\n author = \"Sorokin, Alexey\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.785/\",\n doi = \"10.18653/v1/2022.emnlp-main.785\",\n pages = \"11416--11429\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.785.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.785/", + "pdf_size": 388962, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4302393897579981658&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "", + "aff_domain": "connect.ust.hk;student.unsw.edu.au;ust.hk", + "email": "connect.ust.hk;student.unsw.edu.au;ust.hk", + "github": "https://github.com/AlexeySorokin/EditScorer11416", + "project": "", + "author_num": 1 + }, + { + "id": "2022.emnlp-main.538", + "title": "Improving Aspect Sentiment Quad Prediction via Template-Order Data Augmentation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently, aspect sentiment quad prediction (ASQP) has become a popular task in the field of aspect-level sentiment analysis. Previous work utilizes a predefined template to paraphrase the original sentence into a structure target sequence, which can be easily decoded as quadruplets of the form (aspect category, aspect term, opinion term, sentiment polarity). The template involves the four elements in a fixed order. However, we observe that this solution contradicts with the order-free property of the ASQP task, since there is no need to fix the template order as long as the quadruplet is extracted correctly. Inspired by the observation, we study the effects of template orders and find that some orders help the generative model achieve better performance. It is hypothesized that different orders provide various views of the quadruplet. Therefore, we propose a simple but effective method to identify the most proper orders, and further combine multiple proper templates as data augmentation to improve the ASQP task. Specifically, we use the pre-trained language model to select the orders with minimal entropy. By fine-tuning the pre-trained language model with these template orders, our approach improves the performance of quad prediction, and outperforms state-of-the-art methods significantly in low-resource settings.", + "author": "Mengting Hu; Yike Wu; Hang Gao; Yinhao Bai; Shiwan Zhao", + "authorids": "/m/mengting-hu/; /y/yike-wu/; /h/hang-gao/; /y/yinhao-bai/; /s/shiwan-zhao/", + "bibtex": "@inproceedings{hu-etal-2022-improving-aspect,\n title = \"Improving Aspect Sentiment Quad Prediction via Template-Order Data Augmentation\",\n author = \"Hu, Mengting and\n Wu, Yike and\n Gao, Hang and\n Bai, Yinhao and\n Zhao, Shiwan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.538/\",\n doi = \"10.18653/v1/2022.emnlp-main.538\",\n pages = \"7889--7900\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.538.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.538/", + "pdf_size": 1746793, + "gs_citation": 60, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3871269515457603346&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "College of Software, Nankai University; School of Journalism and Communication, Nankai University; Institute for Public Safety Research, Tsinghua University; College of Software, Nankai University; Independent researcher", + "aff_domain": "nankai.edu.cn;nankai.edu.cn;mail.tsinghua.edu.cn;mail.nankai.edu.cn;gmail.com", + "email": "nankai.edu.cn;nankai.edu.cn;mail.tsinghua.edu.cn;mail.nankai.edu.cn;gmail.com", + "github": "https://github.com/hmt2014/AspectQuad", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;2", + "aff_unique_norm": "Nankai University;Tsinghua University;Independent researcher", + "aff_unique_dep": "College of Software;Institute for Public Safety Research;", + "aff_unique_url": "http://www.nankai.edu.cn;https://www.tsinghua.edu.cn;", + "aff_unique_abbr": "Nankai;Tsinghua;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "2022.findings-emnlp.302", + "title": "Improving Bilingual Lexicon Induction with Cross-Encoder Reranking", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Bilingual lexicon induction (BLI) with limited bilingual supervision is a crucial yet challenging task in multilingual NLP. Current state-of-the-art BLI methods rely on the induction of cross-lingual word embeddings (CLWEs) to capture cross-lingual word similarities; such CLWEs are obtained 1) via traditional static models (e.g., VecMap), or 2) by extracting type-level CLWEs from multilingual pretrained language models (mPLMs), or 3) through combining the former two options. In this work, we propose a novel semi-supervised post-hoc reranking method termed BLICEr (BLI with Cross-Encoder Reranking), applicable to any precalculated CLWE space, which improves their BLI capability. The key idea is to \u2018extract\u2019 cross-lingual lexical knowledge from mPLMs, and then combine it with the original CLWEs. This crucial step is done via 1) creating a word similarity dataset, comprising positive word pairs (i.e., true translations) and hard negative pairs induced from the original CLWE space, and then 2) fine-tuning an mPLM (e.g., mBERT or XLM-R) in a cross-encoder manner to predict the similarity scores. At inference, we 3) combine the similarity score from the original CLWE space with the score from the BLI-tuned cross-encoder. BLICEr establishes new state-of-the-art results on two standard BLI benchmarks spanning a wide spectrum of diverse languages: it substantially outperforms a series of strong baselines across the board. We also validate the robustness of BLICEr with different CLWEs.", + "author": "Yaoyiran Li; Fangyu Liu; Ivan Vuli\u0107; Anna Korhonen", + "authorids": "/y/yaoyiran-li/; /f/fangyu-liu/; /i/ivan-vulic/; /a/anna-korhonen/", + "bibtex": "@inproceedings{li-etal-2022-improving-bilingual,\n title = \"Improving Bilingual Lexicon Induction with Cross-Encoder Reranking\",\n author = \"Li, Yaoyiran and\n Liu, Fangyu and\n Vuli{\\'c}, Ivan and\n Korhonen, Anna\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.302/\",\n doi = \"10.18653/v1/2022.findings-emnlp.302\",\n pages = \"4100--4116\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.302.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.302/", + "pdf_size": 643850, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9600257752620354220&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Language Technology Lab, TAL, University of Cambridge; Language Technology Lab, TAL, University of Cambridge; Language Technology Lab, TAL, University of Cambridge; Language Technology Lab, TAL, University of Cambridge", + "aff_domain": "cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk", + "email": "cam.ac.uk;cam.ac.uk;cam.ac.uk;cam.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Cambridge", + "aff_unique_dep": "Language Technology Lab, TAL", + "aff_unique_url": "https://www.cam.ac.uk", + "aff_unique_abbr": "Cambridge", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.287", + "title": "Improving Chinese Spelling Check by Character Pronunciation Prediction: The Effects of Adaptivity and Granularity", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Chinese spelling check (CSC) is a fundamental NLP task that detects and corrects spelling errors in Chinese texts. As most of these spelling errors are caused by phonetic similarity, effectively modeling the pronunciation of Chinese characters is a key factor for CSC. In this paper, we consider introducing an auxiliary task of Chinese pronunciation prediction (CPP) to improve CSC, and, for the first time, systematically discuss the adaptivity and granularity of this auxiliary task. We propose SCOPE which builds upon a shared encoder two parallel decoders, one for the primary CSC task and the other for a fine-grained auxiliary CPP task, with a novel adaptive weighting scheme to balance the two tasks. In addition, we design a delicate iterative correction strategy for further improvements during inference. Empirical evaluation shows that SCOPE achieves new state-of-the-art on three CSC benchmarks, demonstrating the effectiveness and superiority of the auxiliary CPP task. Comprehensive ablation studies further verify the positive effects of adaptivity and granularity of the task.", + "author": "Jiahao Li; Quan Wang; Zhendong Mao; Junbo Guo; Yanyan Yang; Yongdong Zhang", + "authorids": "/j/jiahao-li/; /q/quan-wang/; /z/zhendong-mao/; /j/junbo-guo/; /y/yanyan-yang/; /y/yongdong-zhang/", + "bibtex": "@inproceedings{li-etal-2022-improving-chinese,\n title = \"Improving {C}hinese Spelling Check by Character Pronunciation Prediction: The Effects of Adaptivity and Granularity\",\n author = \"Li, Jiahao and\n Wang, Quan and\n Mao, Zhendong and\n Guo, Junbo and\n Yang, Yanyan and\n Zhang, Yongdong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.287/\",\n doi = \"10.18653/v1/2022.emnlp-main.287\",\n pages = \"4275--4286\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.287.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.287/", + "pdf_size": 924584, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9204840179422255331&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/jiahaozhenbang/SCOPE", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.10", + "title": "Improving Complex Knowledge Base Question Answering via Question-to-Action and Question-to-Question Alignment", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Complex knowledge base question answering can be achieved by converting questions into sequences of predefined actions. However, there is a significant semantic and structural gap between natural language and action sequences, which makes this conversion difficult. In this paper, we introduce an alignment-enhanced complex question answering framework, called ALCQA, which mitigates this gap through question-to-action alignment and question-to-question alignment. We train a question rewriting model to align the question and each action, and utilize a pretrained language model to implicitly align the question and KG artifacts. Moreover, considering that similar questions correspond to similar action sequences, we retrieve top-k similar question-answer pairs at the inference stage through question-to-question alignment and propose a novel reward-guided action sequence selection strategy to select from candidate action sequences. We conduct experiments on CQA and WQSP datasets, and the results show that our approach outperforms state-of-the-art methods and obtains a 9.88% improvements in the F1 metric on CQA dataset. Our source code is available at https://github.com/TTTTTTTTy/ALCQA.", + "author": "Yechun Tang; Xiaoxia Cheng; Weiming Lu", + "authorids": "/y/yechun-tang/; /x/xiaoxia-cheng/; /w/weiming-lu/", + "bibtex": "@inproceedings{tang-etal-2022-improving,\n title = \"Improving Complex Knowledge Base Question Answering via Question-to-Action and Question-to-Question Alignment\",\n author = \"Tang, Yechun and\n Cheng, Xiaoxia and\n Lu, Weiming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.10/\",\n doi = \"10.18653/v1/2022.emnlp-main.10\",\n pages = \"137--147\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.10.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.10/", + "pdf_size": 507468, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8442309287253044239&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University", + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "https://github.com/TTTTTTTTy/ALCQA", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "College of Computer Science and Technology", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.776", + "title": "Improving Embeddings Representations for Comparing Higher Education Curricula: A Use Case in Computing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We propose an approach for comparing curricula of study programs in higher education. Pre-trained word embeddings are fine-tuned in a study program classification task, where each curriculum is represented by the names and content of its courses. By combining metric learning with a novel course-guided attention mechanism, our method obtains more accurate curriculum representations than strong baselines. Experiments on a new dataset with curricula of computing programs demonstrate the intuitive power of our approach via attention weights, topic modeling, and embeddings visualizations. We also present a use case comparing computing curricula from USA and Latin America to showcase the capabilities of our improved embeddings representations.", + "author": "Jeffri Murrugarra-Llerena; Fernando Alva-Manchego; Nils Murrugarra-LLerena", + "authorids": "/j/jeffri-murrugarra-llerena/; /f/fernando-alva-manchego/; /n/nils-murrugarra-llerena/", + "bibtex": "@inproceedings{murrugarra-llerena-etal-2022-improving,\n title = \"Improving Embeddings Representations for Comparing Higher Education Curricula: A Use Case in Computing\",\n author = \"Murrugarra-Llerena, Jeffri and\n Alva-Manchego, Fernando and\n Murrugarra-LLerena, Nils\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.776/\",\n doi = \"10.18653/v1/2022.emnlp-main.776\",\n pages = \"11299--11307\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.776.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.776/", + "pdf_size": 4302678, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15989121734338047843&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Institute of Informatics, Federal University of Rio Grande do Sul (UFRGS), Brasil; School of Computer Science and Informatics, Cardiff University, UK; School of Computing, Weber State University, USA", + "aff_domain": "inf.ufrgs.br;cardiff.ac.uk;weber.edu", + "email": "inf.ufrgs.br;cardiff.ac.uk;weber.edu", + "github": "https://github.com/Artcs1/DL_curriculas", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Federal University of Rio Grande do Sul;Cardiff University;Weber State University", + "aff_unique_dep": "Institute of Informatics;School of Computer Science and Informatics;School of Computing", + "aff_unique_url": "https://www.ufrgs.br;https://www.cardiff.ac.uk;https://www.weber.edu", + "aff_unique_abbr": "UFRGS;Cardiff;WSU", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Rio Grande do Sul;Cardiff;", + "aff_country_unique_index": "0;1;2", + "aff_country_unique": "Brasil;United Kingdom;United States" + }, + { + "id": "2022.findings-emnlp.238", + "title": "Improving English-Arabic Transliteration with Phonemic Memories", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Transliteration is an important task in natural language processing (NLP) which aims to convert a name in the source language to the target language without changing its pronunciation. Particularly, transliteration from English to Arabic is highly needed in many applications, especially in countries (e.g., United Arab Emirates (UAE)) whose most citizens are foreigners but the official language is Arabic. In such a task-oriented scenario, namely transliterating the English names to the corresponding Arabic ones, the performance of the transliteration model is highly important. However, most existing neural approaches mainly apply a universal transliteration model with advanced encoders and decoders to the task, where limited attention is paid to leveraging the phonemic association between English and Arabic to further improve model performance. In this paper, we focus on transliteration of people\u2019s names from English to Arabic for the general public. In doing so, we collect a corpus named EANames by extracting high quality name pairs from online resources which better represent the names in the general public than linked Wikipedia entries that are always names of famous people). We propose a model for English-Arabic transliteration, where a memory module modeling the phonemic association between English and Arabic is used to guide the transliteration process. We run experiments on the collected data and the results demonstrate the effectiveness of our approach for English-Arabic transliteration.", + "author": "Yuanhe Tian; Renze Lou; Xiangyu Pang; Lianxi Wang; Shengyi Jiang; Yan Song", + "authorids": "/y/yuanhe-tian/; /r/renze-lou/; /x/xiangyu-pang/; /l/lianxi-wang/; /s/shengyi-jiang/; /y/yan-song/", + "bibtex": "@inproceedings{tian-etal-2022-improving-english,\n title = \"Improving {E}nglish-{A}rabic Transliteration with Phonemic Memories\",\n author = \"Tian, Yuanhe and\n Lou, Renze and\n Pang, Xiangyu and\n Wang, Lianxi and\n Jiang, Shengyi and\n Song, Yan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.238/\",\n doi = \"10.18653/v1/2022.findings-emnlp.238\",\n pages = \"3262--3272\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.238.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.238/", + "pdf_size": 375273, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12287641051703091810&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "University of Science and Technology of China+University of Washington; Temple University; China Merchants Securities; Guangdong University of Foreign Studies; Guangdong University of Foreign Studies; University of Science and Technology of China", + "aff_domain": "uw.edu;temple.edu;foxmail.com;gdufs.edu.cn;163.com;gmail.com", + "email": "uw.edu;temple.edu;foxmail.com;gdufs.edu.cn;163.com;gmail.com", + "github": "https://github.com/synlp/EATrans", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;3;4;4;0", + "aff_unique_norm": "University of Science and Technology of China;University of Washington;Temple University;China Merchants Securities Co., Ltd.;Guangdong University of Foreign Studies", + "aff_unique_dep": ";;;;", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.washington.edu;https://www.temple.edu;http://www.cms.com.cn;http://www.gdufs.edu.cn", + "aff_unique_abbr": "USTC;UW;Temple;CMS;GDUFS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;0;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.454", + "title": "Improving Event Coreference Resolution Using Document-level and Topic-level Information", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Event coreference resolution (ECR) aims to cluster event mentions that refer to the same real-world events. Deep learning methods have achieved SOTA results on the ECR task. However, due to the encoding length limitation, previous methods either adopt classical pairwise models based on sentence-level context or split each document into multiple chunks and encode them separately. They failed to capture the interactions and contextual cues among those long-distance event mentions. Besides, high-level information, such as event topics, is rarely considered to enhance representation learning for ECR. To address the above two issues, we first apply a Longformer-based encoder to obtain the document-level embeddings and an encoder with a trigger-mask mechanism to learn sentence-level embeddings based on local context. In addition, we propose an event topic generator to infer the latent topic-level representations. Finally, using the above event embeddings, we employ a multiple tensor matching method to capture their interactions at the document, sentence, and topic levels. Experimental results on the KBP 2017 dataset show that our model outperforms the SOTA baselines.", + "author": "Sheng Xu; Peifeng Li; Qiaoming Zhu", + "authorids": "/s/sheng-xu/; /p/peifeng-li/; /q/qiaoming-zhu/", + "bibtex": "@inproceedings{xu-etal-2022-improving,\n title = \"Improving Event Coreference Resolution Using Document-level and Topic-level Information\",\n author = \"Xu, Sheng and\n Li, Peifeng and\n Zhu, Qiaoming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.454/\",\n doi = \"10.18653/v1/2022.emnlp-main.454\",\n pages = \"6765--6775\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.454.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.454/", + "pdf_size": 599495, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=946607956603448388&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "School of Computer Science and Technology, Soochow University; School of Computer Science and Technology, Soochow University; School of Computer Science and Technology, Soochow University", + "aff_domain": "stu.suda.edu.cn;suda.edu.cn;suda.edu.cn", + "email": "stu.suda.edu.cn;suda.edu.cn;suda.edu.cn", + "github": "https://github.com/jsksxs360/event-coref-emnlp2022", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Soochow University", + "aff_unique_dep": "School of Computer Science and Technology", + "aff_unique_url": "https://eng.suda.edu.cn/", + "aff_unique_abbr": "Soochow U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.623", + "title": "Improving Factual Consistency in Summarization with Compression-Based Post-Editing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "State-of-the-art summarization models still struggle to be factually consistent with the input text. A model-agnostic way to address this problem is post-editing the generated summaries. However, existing approaches typically fail to remove entity errors if a suitable input entity replacement is not available or may insert erroneous content. In our work, we focus on removing extrinsic entity errors, or entities not in the source, to improve consistency while retaining the summary\u2019s essential information and form. We propose to use sentence-compression data to train the post-editing model to take a summary with extrinsic entity errors marked with special tokens and output a compressed, well-formed summary with those errors removed. We show that this model improves factual consistency while maintaining ROUGE, improving entity precision by up to 30% on XSum, and that this model can be applied on top of another post-editor, improving entity precision by up to a total of 38%. We perform an extensive comparison of post-editing approaches that demonstrate trade-offs between factual consistency, informativeness, and grammaticality, and we analyze settings where post-editors show the largest improvements.", + "author": "Alex Fabbri; Prafulla Kumar Choubey; Jesse Vig; Chien-Sheng Wu; Caiming Xiong", + "authorids": "/a/alex-fabbri/; /p/prafulla-kumar-choubey/; /j/jesse-vig/; /c/chien-sheng-wu/; /c/caiming-xiong/", + "bibtex": "@inproceedings{fabbri-etal-2022-improving,\n title = \"Improving Factual Consistency in Summarization with Compression-Based Post-Editing\",\n author = \"Fabbri, Alex and\n Choubey, Prafulla Kumar and\n Vig, Jesse and\n Wu, Chien-Sheng and\n Xiong, Caiming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.623/\",\n doi = \"10.18653/v1/2022.emnlp-main.623\",\n pages = \"9149--9156\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.623.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.623/", + "pdf_size": 358939, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5454259418792986335&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Salesforce AI Research; Salesforce AI Research; Salesforce AI Research; Salesforce AI Research; Salesforce AI Research", + "aff_domain": "salesforce.com;salesforce.com;salesforce.com;salesforce.com;salesforce.com", + "email": "salesforce.com;salesforce.com;salesforce.com;salesforce.com;salesforce.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Salesforce", + "aff_unique_dep": "Salesforce AI Research", + "aff_unique_url": "https://www.salesforce.com", + "aff_unique_abbr": "Salesforce AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.816", + "title": "Improving Faithfulness by Augmenting Negative Summaries from Fake Documents", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Current abstractive summarization systems tend to hallucinate content that is unfaithful to the source document, posing a risk of misinformation. To mitigate hallucination, we must teach the model to distinguish hallucinated summaries from faithful ones. However, the commonly used maximum likelihood training does not disentangle factual errors from other model errors. To address this issue,we propose a back-translation-style approach to augment negative samples that mimic factual errors made by the model. Specifically, we train an elaboration model that generates hallucinated documents given the reference summaries, and then generates negative summaries from the fake documents. We incorporate the negative samples into training through a controlled generator, which produces faithful/unfaithful summaries conditioned on the control codes. Additionally, we find that adding textual entailment data through multitasking further boosts the performance. Experiments on three datasets (XSum, Gigaword, and WikiHow) show that our method consistently improves faithfulness without sacrificing informativeness according to both human and automatic evaluation", + "author": "Tianshu Wang; Faisal Ladhak; Esin Durmus; He He", + "authorids": "/t/tianshu-wang/; /f/faisal-ladhak/; /e/esin-durmus/; /h/he-he/", + "bibtex": "@inproceedings{wang-etal-2022-improving,\n title = \"Improving Faithfulness by Augmenting Negative Summaries from Fake Documents\",\n author = \"Wang, Tianshu and\n Ladhak, Faisal and\n Durmus, Esin and\n He, He\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.816/\",\n doi = \"10.18653/v1/2022.emnlp-main.816\",\n pages = \"11913--11921\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.816.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.816/", + "pdf_size": 512332, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16505781922800190193&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 3, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/COFE2022/CoFE", + "project": "", + "author_num": 4 + }, + { + "id": "2022.findings-emnlp.506", + "title": "Improving Few-Shot Domain Transfer for Named Entity Disambiguation with Pattern Exploitation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Named entity disambiguation (NED) is a critical subtask of entity linking, which seeks to connect knowledge base entities with textual mentions of those entities. Naturally, the performance of a model depends on the domain it was trained on; thus, reducing the amount of data required to train models is advantageous. In this work, we leverage recent research on pattern exploitation for NED and explore whether it can reduce the amount of data required for domain adaptation by reformulating the disambiguation task as a masked language modeling problem. Using ADAPET (Tam et al., 2021), which implements a new approach for few-shot learning using fine-tuned transformer-based language models, we produce an NED model which yields, without any sacrifice of in-domain accuracy, a 7% improvement in zero-shot cross-domain performance as evaluated on NEDMed, a new NED dataset of mental health news which we release with this work.", + "author": "Philip Blair; Kfir Bar", + "authorids": "/p/philip-blair/; /k/kfir-bar/", + "bibtex": "@inproceedings{blair-bar-2022-improving,\n title = \"Improving Few-Shot Domain Transfer for Named Entity Disambiguation with Pattern Exploitation\",\n author = \"Blair, Philip and\n Bar, Kfir\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.506/\",\n doi = \"10.18653/v1/2022.findings-emnlp.506\",\n pages = \"6797--6810\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.506.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.506/", + "pdf_size": 304606, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=253051405403342722&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2 + }, + { + "id": "2022.findings-emnlp.363", + "title": "Improving Generalization of Pre-trained Language Models via Stochastic Weight Averaging", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Knowledge Distillation (KD) is a commonly used technique for improving the generalization of compact Pre-trained Language Models (PLMs) on downstream tasks. However, such methods impose the additional burden of training a separate teacher model for every new dataset.Alternatively, one may directly work on the improvement of the optimization procedure of the compact model towards better generalization. Recent works observe that the flatness of the local minimum correlates well with better generalization.In this work, we adapt Stochastic Weight Averaging (SWA), a method encouraging convergence to a flatter minimum, to fine-tuning PLMs. We conduct extensive experiments on various NLP tasks (text classification, question answering, and generation) and different model architectures and demonstrate that our adaptation improves the generalization without extra computation cost. Moreover, we observe that this simple optimization technique is able to outperform the state-of-the-art KD methods for compact models.", + "author": "Peng Lu; Ivan Kobyzev; Mehdi Rezagholizadeh; Ahmad Rashid; Ali Ghodsi; Phillippe Langlais", + "authorids": "/p/peng-lu/; /i/ivan-kobyzev/; /m/mehdi-rezagholizadeh/; /a/ahmad-rashid/; /a/ali-ghodsi/; /p/philippe-langlais/", + "bibtex": "@inproceedings{lu-etal-2022-improving,\n title = \"Improving Generalization of Pre-trained Language Models via Stochastic Weight Averaging\",\n author = \"Lu, Peng and\n Kobyzev, Ivan and\n Rezagholizadeh, Mehdi and\n Rashid, Ahmad and\n Ghodsi, Ali and\n Langlais, Phillippe\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.363/\",\n doi = \"10.18653/v1/2022.findings-emnlp.363\",\n pages = \"4948--4954\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.363.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.363/", + "pdf_size": 216745, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12459644080436116791&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 3, + "aff": "Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab+RALI/DIRO, Universit\u00e9 de Montr\u00e9al, Canada; Department of Statistics and Actuarial Science, University of Waterloo; RALI/DIRO, Universit\u00e9 de Montr\u00e9al, Canada", + "aff_domain": "huawei.com;huawei.com;huawei.com;huawei.com;uwaterloo.ca;iro.umontreal.ca", + "email": "huawei.com;huawei.com;huawei.com;huawei.com;uwaterloo.ca;iro.umontreal.ca", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0+1;2;1", + "aff_unique_norm": "Huawei;Universit\u00e9 de Montr\u00e9al;University of Waterloo", + "aff_unique_dep": "Noah\u2019s Ark Lab;RALI/DIRO;Department of Statistics and Actuarial Science", + "aff_unique_url": "https://www.huawei.com;https://www.umontreal.ca;https://uwaterloo.ca", + "aff_unique_abbr": "Huawei;UdeM;UWaterloo", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Montr\u00e9al", + "aff_country_unique_index": "0;0;0;0+1;1;1", + "aff_country_unique": "China;Canada" + }, + { + "id": "2022.findings-emnlp.331", + "title": "Improving HowNet-Based Chinese Word Sense Disambiguation with Translations", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Word sense disambiguation (WSD) is the task of identifying the intended sense of a word in context. While prior work on unsupervised WSD has leveraged lexical knowledge bases, such as WordNet and BabelNet, these resources have proven to be less effective for Chinese. Instead, the most widely used lexical knowledge base for Chinese is HowNet. Previous HowNet-based WSD methods have not exploited contextual translation information. In this paper, we present the first HowNet-based WSD system which combines monolingual contextual information from a pretrained neural language model with bilingual information obtained via machine translation and sense translation information from HowNet. The results of our evaluation experiment on a test set from prior work demonstrate that our new method achieves a new state of the art for unsupervised Chinese WSD.", + "author": "Xiang Zhang; Bradley Hauer; Grzegorz Kondrak", + "authorids": "/x/xiang-zhang/; /b/bradley-hauer/; /g/grzegorz-kondrak/", + "bibtex": "@inproceedings{zhang-etal-2022-improving-hownet,\n title = \"Improving {H}ow{N}et-Based {C}hinese Word Sense Disambiguation with Translations\",\n author = \"Zhang, Xiang and\n Hauer, Bradley and\n Kondrak, Grzegorz\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.331/\",\n doi = \"10.18653/v1/2022.findings-emnlp.331\",\n pages = \"4530--4536\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.331.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.331/", + "pdf_size": 309081, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9698287622779407820&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Alberta Machine Intelligence Institute, Department of Computing Science, University of Alberta, Edmonton, Canada; Alberta Machine Intelligence Institute, Department of Computing Science, University of Alberta, Edmonton, Canada; Alberta Machine Intelligence Institute, Department of Computing Science, University of Alberta, Edmonton, Canada", + "aff_domain": "ualberta.ca;ualberta.ca;ualberta.ca", + "email": "ualberta.ca;ualberta.ca;ualberta.ca", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Alberta", + "aff_unique_dep": "Department of Computing Science", + "aff_unique_url": "https://www.ualberta.ca", + "aff_unique_abbr": "UAlberta", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Edmonton", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "2022.emnlp-main.678", + "title": "Improving Iterative Text Revision by Learning Where to Edit from Other Revision Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Iterative text revision improves text quality by fixing grammatical errors, rephrasing for better readability or contextual appropriateness, or reorganizing sentence structures throughout a document.Most recent research has focused on understanding and classifying different types of edits in the iterative revision process from human-written text instead of building accurate and robust systems for iterative text revision.In this work, we aim to build an end-to-end text revision system that can iteratively generate helpful edits by explicitly detecting editable spans (where-to-edit) with their corresponding edit intents and then instructing a revision model to revise the detected edit spans.Leveraging datasets from other related text editing NLP tasks, combined with the specification of editable spans, leads our system to more accurately model the process of iterative text refinement, as evidenced by empirical results and human evaluations.Our system significantly outperforms previous baselines on our text revision tasks and other standard text revision tasks, including grammatical error correction, text simplification, sentence fusion, and style transfer.Through extensive qualitative and quantitative analysis, we make vital connections between edit intentions and writing quality, and better computational modeling of iterative text revisions.", + "author": "Zae Myung Kim; Wanyu Du; Vipul Raheja; Dhruv Kumar; Dongyeop Kang", + "authorids": "/z/zae-myung-kim/; /w/wanyu-du/; /v/vipul-raheja/; /d/dhruv-kumar/; /d/dongyeop-kang/", + "bibtex": "@inproceedings{kim-etal-2022-improving,\n title = \"Improving Iterative Text Revision by Learning Where to Edit from Other Revision Tasks\",\n author = \"Kim, Zae Myung and\n Du, Wanyu and\n Raheja, Vipul and\n Kumar, Dhruv and\n Kang, Dongyeop\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.678/\",\n doi = \"10.18653/v1/2022.emnlp-main.678\",\n pages = \"9986--9999\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.678.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.678/", + "pdf_size": 351496, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9159218557619183703&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Minnesota+Grammarly; University of Virginia; Grammarly; Grammarly; University of Minnesota", + "aff_domain": "umn.edu;virginia.edu;grammarly.com;grammarly.com;umn.edu", + "email": "umn.edu;virginia.edu;grammarly.com;grammarly.com;umn.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;1;1;0", + "aff_unique_norm": "University of Minnesota;Grammarly;University of Virginia", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.minnesota.edu;https://www.grammarly.com;https://www.virginia.edu", + "aff_unique_abbr": "UMN;Grammarly;UVA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.37", + "title": "Improving Large-Scale Conversational Assistants using Model Interpretation based Training Sample Selection", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "This paper presents an approach to identify samples from live traffic where the customer implicitly communicated satisfaction with Alexa\u2019s responses, by leveraging interpretations of model behavior. Such customer signals are noisy and adding a large number of samples from live traffic to training set makes re-training infeasible. Our work addresses these challenges by identifying a small number of samples that grow training set by ~0.05% while producing statistically significant improvements in both offline and online tests.", + "author": "Stefan Schroedl; Manoj Kumar; Kiana Hajebi; Morteza Ziyadi; Sriram Venkatapathy; Anil Ramakrishna; Rahul Gupta; Pradeep Natarajan", + "authorids": "/s/stefan-schroedl/; /m/manoj-kumar/; /k/kiana-hajebi/; /m/morteza-ziyadi/; /s/sriram-venkatapathy/; /a/anil-ramakrishna/; /r/rahul-gupta/; /p/pradeep-natarajan/", + "bibtex": "@inproceedings{schroedl-etal-2022-improving,\n title = \"Improving Large-Scale Conversational Assistants using Model Interpretation based Training Sample Selection\",\n author = \"Schroedl, Stefan and\n Kumar, Manoj and\n Hajebi, Kiana and\n Ziyadi, Morteza and\n Venkatapathy, Sriram and\n Ramakrishna, Anil and\n Gupta, Rahul and\n Natarajan, Pradeep\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.37/\",\n doi = \"10.18653/v1/2022.emnlp-industry.37\",\n pages = \"371--378\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.37.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.37/", + "pdf_size": 881479, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5893251627694532871&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Amazon Alexa AI, USA; Amazon Alexa AI, USA; Amazon Alexa AI, USA; Amazon Alexa AI, USA; Amazon Alexa AI, USA; Amazon Alexa AI, USA; Amazon Alexa AI, USA; Amazon Alexa AI, USA", + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Amazon Alexa AI", + "aff_unique_dep": "Amazon Alexa AI", + "aff_unique_url": "https://www.amazon.com/alexa", + "aff_unique_abbr": "Amazon Alexa AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.631", + "title": "Improving Large-scale Paraphrase Acquisition and Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This paper addresses the quality issues in existing Twitter-based paraphrase datasets, and discusses the necessity of using two separate definitions of paraphrase for identification and generation tasks. We present a new Multi-Topic Paraphrase in Twitter (MultiPIT) corpus that consists of a total of 130k sentence pairs with crowdsoursing (MultiPIT_crowd) and expert (MultiPIT_expert) annotations using two different paraphrase definitions for paraphrase identification, in addition to a multi-reference test set (MultiPIT_NMR) and a large automatically constructed training set (MultiPIT_Auto) for paraphrase generation. With improved data annotation quality and task-specific paraphrase definition, the best pre-trained language model fine-tuned on our dataset achieves the state-of-the-art performance of 84.2 F1 for automatic paraphrase identification. Furthermore, our empirical results also demonstrate that the paraphrase generation models trained on MultiPIT_Auto generate more diverse and high-quality paraphrases compared to their counterparts fine-tuned on other corpora such as Quora, MSCOCO, and ParaNMT.", + "author": "Yao Dou; Chao Jiang; Wei Xu", + "authorids": "/y/yao-dou/; /c/chao-jiang/; /w/wei-xu/", + "bibtex": "@inproceedings{dou-etal-2022-improving,\n title = \"Improving Large-scale Paraphrase Acquisition and Generation\",\n author = \"Dou, Yao and\n Jiang, Chao and\n Xu, Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.631/\",\n doi = \"10.18653/v1/2022.emnlp-main.631\",\n pages = \"9301--9323\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.631.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.631/", + "pdf_size": 6472939, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=338124473211023075&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Interactive Computing, Georgia Institute of Technology; School of Interactive Computing, Georgia Institute of Technology; School of Interactive Computing, Georgia Institute of Technology", + "aff_domain": "gatech.edu;gatech.edu;cc.gatech.edu", + "email": "gatech.edu;gatech.edu;cc.gatech.edu", + "github": "", + "project": "http://twitter-paraphrase.com/", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Georgia Institute of Technology", + "aff_unique_dep": "School of Interactive Computing", + "aff_unique_url": "https://www.gatech.edu", + "aff_unique_abbr": "Georgia Tech", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Atlanta", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.822", + "title": "Improving Low-Resource Languages in Pre-Trained Multilingual Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained multilingual language models are the foundation of many NLP approaches, including cross-lingual transfer solutions. However, languages with small available monolingual corpora are often not well-supported by these models leading to poor performance. We propose an unsupervised approach to improve the cross-lingual representations of low-resource languages by bootstrapping word translation pairs from monolingual corpora and using them to improve language alignment in pre-trained language models. We perform experiments on nine languages, using contextual word retrieval and zero-shot named entity recognition to measure both intrinsic cross-lingual word representation quality and downstream task performance, showing improvements on both tasks. Our results show that it is possible to improve pre-trained multilingual language models by relying only on non-parallel resources.", + "author": "Viktor Hangya; Hossain Shaikh Saadi; Alexander Fraser", + "authorids": "/v/viktor-hangya/; /h/hossain-shaikh-saadi/; /a/alexander-fraser/", + "bibtex": "@inproceedings{hangya-etal-2022-improving,\n title = \"Improving Low-Resource Languages in Pre-Trained Multilingual Language Models\",\n author = \"Hangya, Viktor and\n Saadi, Hossain Shaikh and\n Fraser, Alexander\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.822/\",\n doi = \"10.18653/v1/2022.emnlp-main.822\",\n pages = \"11993--12006\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.822.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.822/", + "pdf_size": 482483, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2357712263503349444&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Center for Information and Language Processing, LMU Munich, Germany + Munich Center for Machine Learning, Germany; Technical University of Munich, Germany; Center for Information and Language Processing, LMU Munich, Germany + Munich Center for Machine Learning, Germany", + "aff_domain": "cis.lmu.de;tum.de;cis.lmu.de", + "email": "cis.lmu.de;tum.de;cis.lmu.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2;0+1", + "aff_unique_norm": "LMU Munich;Munich Center for Machine Learning;Technical University of Munich", + "aff_unique_dep": "Center for Information and Language Processing;;", + "aff_unique_url": "https://www.lmu.de;;https://www.tum.de", + "aff_unique_abbr": "LMU;;TUM", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Munich;", + "aff_country_unique_index": "0+0;0;0+0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.361", + "title": "Improving Machine Translation with Phrase Pair Injection and Corpus Filtering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we show that the combination of Phrase Pair Injection and Corpus Filtering boosts the performance of Neural Machine Translation (NMT) systems. We extract parallel phrases and sentences from the pseudo-parallel corpus and augment it with the parallel corpus to train the NMT models. With the proposed approach, we observe an improvement in the Machine Translation (MT) system for 3 low-resource language pairs, Hindi-Marathi, English-Marathi, and English-Pashto, and 6 translation directions by up to 2.7 BLEU points, on the FLORES test data. These BLEU score improvements are over the models trained using the whole pseudo-parallel corpus augmented with the parallel corpus.", + "author": "Akshay Batheja; Pushpak Bhattacharyya", + "authorids": "/a/akshay-batheja/; /p/pushpak-bhattacharyya/", + "bibtex": "@inproceedings{batheja-bhattacharyya-2022-improving,\n title = \"Improving Machine Translation with Phrase Pair Injection and Corpus Filtering\",\n author = \"Batheja, Akshay and\n Bhattacharyya, Pushpak\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.361/\",\n doi = \"10.18653/v1/2022.emnlp-main.361\",\n pages = \"5395--5400\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.361.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.361/", + "pdf_size": 175695, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5935347649767028424&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "CFILT, Indian Institute of Technology Bombay; CFILT, Indian Institute of Technology Bombay", + "aff_domain": "cse.iitb.ac.in;cse.iitb.ac.in", + "email": "cse.iitb.ac.in;cse.iitb.ac.in", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Indian Institute of Technology Bombay", + "aff_unique_dep": "CFILT", + "aff_unique_url": "https://www.iitb.ac.in", + "aff_unique_abbr": "IIT Bombay", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Bombay", + "aff_country_unique_index": "0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.emnlp-main.193", + "title": "Improving Multi-task Stance Detection with Multi-task Interaction Network", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Stance detection aims to identify people\u2019s standpoints expressed in the text towards a target, which can provide powerful information for various downstream tasks.Recent studies have proposed multi-task learning models that introduce sentiment information to boost stance detection.However, they neglect to explore capturing the fine-grained task-specific interaction between stance detection and sentiment tasks, thus degrading performance.To address this issue, this paper proposes a novel multi-task interaction network (MTIN) for improving the performance of stance detection and sentiment analysis tasks simultaneously.Specifically, we construct heterogeneous task-related graphs to automatically identify and adapt the roles that a word plays with respect to a specific task. Also, a multi-task interaction module is designed to capture the word-level interaction between tasks, so as to obtain richer task representations.Extensive experiments on two real-world datasets show that our proposed approach outperforms state-of-the-art methods in both stance detection and sentiment analysis tasks.", + "author": "Heyan Chai; Siyu Tang; Jinhao Cui; Ye Ding; Binxing Fang; Qing Liao", + "authorids": "/h/heyan-chai/; /s/siyu-tang/; /j/jinhao-cui/; /y/ye-ding/; /b/binxing-fang/; /q/qing-liao/", + "bibtex": "@inproceedings{chai-etal-2022-improving,\n title = \"Improving Multi-task Stance Detection with Multi-task Interaction Network\",\n author = \"Chai, Heyan and\n Tang, Siyu and\n Cui, Jinhao and\n Ding, Ye and\n Fang, Binxing and\n Liao, Qing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.193/\",\n doi = \"10.18653/v1/2022.emnlp-main.193\",\n pages = \"2990--3000\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.193.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.193/", + "pdf_size": 1001661, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4121998180906158546&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Harbin Institute of Technology, Shenzhen, China; Harbin Institute of Technology, Shenzhen, China; Harbin Institute of Technology, Shenzhen, China; Dongguan University of Technology, China; Harbin Institute of Technology, Shenzhen, China + Peng Cheng Laboratory, Shenzhen, China; Harbin Institute of Technology, Shenzhen, China + Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "stu.hit.edu.cn;stu.hit.edu.cn;163.com;dgut.edu.cn;cae.cn;hit.edu.cn", + "email": "stu.hit.edu.cn;stu.hit.edu.cn;163.com;dgut.edu.cn;cae.cn;hit.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0+2;0+2", + "aff_unique_norm": "Harbin Institute of Technology;Dongguan University of Technology;Peng Cheng Laboratory", + "aff_unique_dep": ";;", + "aff_unique_url": "http://en.hhit.edu.cn/;http://www.dgut.edu.cn;", + "aff_unique_abbr": "HIT;;", + "aff_campus_unique_index": "0;0;0;0+0;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.195", + "title": "Improving Multi-turn Emotional Support Dialogue Generation with Lookahead Strategy Planning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Providing Emotional Support (ES) to soothe people in emotional distress is an essential capability in social interactions. Most existing researches on building ES conversation systems only considered single-turn interactions with users, which was over-simplified. In comparison, multi-turn ES conversation systems can provide ES more effectively, but face several new technical challenges, including: (1) how to adopt appropriate support strategies to achieve the long-term dialogue goal of comforting the user\u2019s emotion; (2) how to dynamically model the user\u2019s state. In this paper, we propose a novel system MultiESC to address these issues. For strategy planning, drawing inspiration from the A* search algorithm, we propose lookahead heuristics to estimate the future user feedback after using particular strategies, which helps to select strategies that can lead to the best long-term effects. For user state modeling, MultiESC focuses on capturing users\u2019 subtle emotional expressions and understanding their emotion causes. Extensive experiments show that MultiESC significantly outperforms competitive baselines in both dialogue generation and strategy planning.", + "author": "Yi Cheng; Wenge Liu; Wenjie Li; Jiashuo Wang; Ruihui Zhao; Bang Liu; Xiaodan Liang; Yefeng Zheng", + "authorids": "/y/yi-cheng/; /w/wenge-liu/; /w/wenjie-li/; /j/jiashuo-wang/; /r/ruihui-zhao/; /b/bang-liu/; /x/xiaodan-liang/; /y/yefeng-zheng/", + "bibtex": "@inproceedings{cheng-etal-2022-improving,\n title = \"Improving Multi-turn Emotional Support Dialogue Generation with Lookahead Strategy Planning\",\n author = \"Cheng, Yi and\n Liu, Wenge and\n Li, Wenjie and\n Wang, Jiashuo and\n Zhao, Ruihui and\n Liu, Bang and\n Liang, Xiaodan and\n Zheng, Yefeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.195/\",\n doi = \"10.18653/v1/2022.emnlp-main.195\",\n pages = \"3014--3026\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.195.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.195/", + "pdf_size": 686086, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=122734064415589423&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 7, + "aff": "Hong Kong Polytechnic University; Baidu Inc., Beijing, China; Hong Kong Polytechnic University; Hong Kong Polytechnic University; Tencent Jarvis Lab; RALI & Mila, Universit\u00e9 de Montr\u00e9al; Sun Yat-sen University; Tencent Jarvis Lab", + "aff_domain": "comp.polyu.edu.hk;gmail.com;comp.polyu.edu.hk;comp.polyu.edu.hk;tencent.com;umontreal.ca;gmail.com;tencent.com", + "email": "comp.polyu.edu.hk;gmail.com;comp.polyu.edu.hk;comp.polyu.edu.hk;tencent.com;umontreal.ca;gmail.com;tencent.com", + "github": "https://github.com/lwgkzl/MultiESC", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;2;3;4;2", + "aff_unique_norm": "Hong Kong Polytechnic University;Baidu Inc.;Tencent;Universit\u00e9 de Montr\u00e9al;Sun Yat-sen University", + "aff_unique_dep": ";;Jarvis Lab;RALI & Mila;", + "aff_unique_url": "https://www.polyu.edu.hk;https://www.baidu.com;https://www.tencent.com;https://www.umontreal.ca;http://www.sysu.edu.cn/", + "aff_unique_abbr": "PolyU;Baidu;Tencent;UdeM;SYSU", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Beijing;Montr\u00e9al", + "aff_country_unique_index": "0;0;0;0;0;1;0;0", + "aff_country_unique": "China;Canada" + }, + { + "id": "2022.emnlp-main.249", + "title": "Improving Passage Retrieval with Zero-Shot Question Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We propose a simple and effective re-ranking method for improving passage retrieval in open question answering. The re-ranker re-scores retrieved passages with a zero-shot question generation model, which uses a pre-trained language model to compute the probability of the input question conditioned on a retrieved passage. This approach can be applied on top of any retrieval method (e.g. neural or keyword-based), does not require any domain- or task-specific training (and therefore is expected to generalize better to data distribution shifts), and provides rich cross-attention between query and passage (i.e. it must explain every token in the question). When evaluated on a number of open-domain retrieval datasets, our re-ranker improves strong unsupervised retrieval models by 6%-18% absolute and strong supervised models by up to 12% in terms of top-20 passage retrieval accuracy. We also obtain new state-of-the-art results on full open-domain question answering by simply adding the new re-ranker to existing models with no further changes.", + "author": "Devendra Sachan; Mike Lewis; Mandar Joshi; Armen Aghajanyan; Wen-tau Yih; Joelle Pineau; Luke Zettlemoyer", + "authorids": "/d/devendra-sachan/; /m/mike-lewis/; /m/mandar-joshi/; /a/armen-aghajanyan/; /w/wen-tau-yih/; /j/joelle-pineau/; /l/luke-zettlemoyer/", + "bibtex": "@inproceedings{sachan-etal-2022-improving,\n title = \"Improving Passage Retrieval with Zero-Shot Question Generation\",\n author = \"Sachan, Devendra and\n Lewis, Mike and\n Joshi, Mandar and\n Aghajanyan, Armen and\n Yih, Wen-tau and\n Pineau, Joelle and\n Zettlemoyer, Luke\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.249/\",\n doi = \"10.18653/v1/2022.emnlp-main.249\",\n pages = \"3781--3797\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.249.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.249/", + "pdf_size": 505413, + "gs_citation": 157, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14637823715709284667&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "McGill University+Mila - Quebec AI Institute; Meta AI Research; Google Research; Meta AI Research; Meta AI Research; McGill University+Mila - Quebec AI Institute+Meta AI Research; University of Washington", + "aff_domain": "mila.quebec;meta.com;google.com;meta.com;meta.com;meta.com;meta.com", + "email": "mila.quebec;meta.com;google.com;meta.com;meta.com;meta.com;meta.com", + "github": "https://github.com/DevSinghSachan/unsupervised-passage-reranking", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2;3;2;2;0+1+2;4", + "aff_unique_norm": "McGill University;Quebec AI Institute;Meta Platforms, Inc.;Google;University of Washington", + "aff_unique_dep": ";AI Institute;Meta AI Research;Google Research;", + "aff_unique_url": "https://www.mcgill.ca;https://mila.quebec;https://meta.com;https://research.google;https://www.washington.edu", + "aff_unique_abbr": "McGill;Mila;Meta AI;Google Research;UW", + "aff_campus_unique_index": ";1;", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0+0;1;1;1;1;0+0+1;1", + "aff_country_unique": "Canada;United States" + }, + { + "id": "2022.emnlp-industry.38", + "title": "Improving Precancerous Case Characterization via Transformer-based Ensemble Learning", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "The application of natural language processing (NLP) to cancer pathology reports has been focused on detecting cancer cases, largely ignoring precancerous cases. Improving the characterization of precancerous adenomas assists in developing diagnostic tests for early cancer detection and prevention, especially for colorectal cancer (CRC). Here we developed transformer-based deep neural network NLP models to perform the CRC phenotyping, with the goal of extracting precancerous lesion attributes and distinguishing cancer and precancerous cases. We achieved 0.914 macro-F1 scores for classifying patients into negative, non-advanced adenoma, advanced adenoma and CRC. We further improved the performance to 0.923 using an ensemble of classifiers for cancer status classification and lesion size named-entity recognition (NER). Our results demonstrated the potential of using NLP to leverage real-world health record data to facilitate the development of diagnostic tests for early cancer prevention.", + "author": "Yizhen Zhong; Jiajie Xiao; Thomas Vetterli; Mahan Matin; Ellen Loo; Jimmy Lin; Richard Bourgon; Ofer Shapira", + "authorids": "/y/yizhen-zhong/; /j/jiajie-xiao/; /t/thomas-vetterli/; /m/mahan-matin/; /e/ellen-loo/; /j/jimmy-lin/; /r/richard-bourgon/; /o/ofer-shapira/", + "bibtex": "@inproceedings{zhong-etal-2022-improving-precancerous,\n title = \"Improving Precancerous Case Characterization via Transformer-based Ensemble Learning\",\n author = \"Zhong, Yizhen and\n Xiao, Jiajie and\n Vetterli, Thomas and\n Matin, Mahan and\n Loo, Ellen and\n Lin, Jimmy and\n Bourgon, Richard and\n Shapira, Ofer\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.38/\",\n doi = \"10.18653/v1/2022.emnlp-industry.38\",\n pages = \"379--389\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.38.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.38/", + "pdf_size": 918302, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9574407499093726099&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Freenome, South San Francisco, CA; Freenome, South San Francisco, CA; Freenome, South San Francisco, CA; Freenome, South San Francisco, CA; Freenome, South San Francisco, CA; Freenome, South San Francisco, CA; Freenome, South San Francisco, CA; Freenome, South San Francisco, CA", + "aff_domain": "freenome.com;freenome.com;freenome.com;freenome.com;freenome.com;freenome.com;freenome.com;freenome.com", + "email": "freenome.com;freenome.com;freenome.com;freenome.com;freenome.com;freenome.com;freenome.com;freenome.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Freenome", + "aff_unique_dep": "", + "aff_unique_url": "https://www.freenome.com", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0", + "aff_campus_unique": "South San Francisco", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.536", + "title": "Improving Scheduled Sampling with Elastic Weight Consolidation for Neural Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Despite strong performance in many sequence-to-sequence tasks, autoregressive models trained with maximum likelihood estimation suffer from exposure bias, i.e. the discrepancy between the ground-truth prefixes used during training and the model-generated prefixes used at inference time. Scheduled sampling is a simple and empirically successful approach which addresses this issue by incorporating model-generated prefixes into training. However, it has been argued that it is an inconsistent training objective leading to models ignoring the prefixes altogether. In this paper, we conduct systematic experiments and find that scheduled sampling, while it ameliorates exposure bias by increasing model reliance on the input sequence, worsens performance when the prefix at inference time is correct, a form of catastrophic forgetting. We propose to use Elastic Weight Consolidation to better balance mitigating exposure bias with retaining performance. Experiments on four IWSLT\u201914 and WMT\u201914 translation datasets demonstrate that our approach alleviates catastrophic forgetting and significantly outperforms maximum likelihood estimation and scheduled sampling baselines.", + "author": "Michalis Korakakis; Andreas Vlachos", + "authorids": "/m/michalis-korakakis/; /a/andreas-vlachos/", + "bibtex": "@inproceedings{korakakis-vlachos-2022-improving,\n title = \"Improving Scheduled Sampling with Elastic Weight Consolidation for Neural Machine Translation\",\n author = \"Korakakis, Michalis and\n Vlachos, Andreas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.536/\",\n doi = \"10.18653/v1/2022.findings-emnlp.536\",\n pages = \"7247--7258\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.536.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.536/", + "pdf_size": 324121, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7446812787800303270&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Cambridge; University of Cambridge", + "aff_domain": "cam.ac.uk;cam.ac.uk", + "email": "cam.ac.uk;cam.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Cambridge", + "aff_unique_dep": "", + "aff_unique_url": "https://www.cam.ac.uk", + "aff_unique_abbr": "Cambridge", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.findings-emnlp.4", + "title": "Improving Semantic Matching through Dependency-Enhanced Pre-trained Model with Adaptive Fusion", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Transformer-based pre-trained models like BERT have achieved great progress on Semantic Sentence Matching. Meanwhile, dependency prior knowledge has also shown general benefits in multiple NLP tasks. However, how to efficiently integrate dependency prior structure into pre-trained models to better model complex semantic matching relations is still unsettled. In this paper, we propose the Dependency-Enhanced Adaptive Fusion Attention (DAFA), which explicitly introduces dependency structure into pre-trained models and adaptively fuses it with semantic information. Specifically, (i) DAFA first proposes a structure-sensitive paradigm to construct a dependency matrix for calibrating attention weights. (ii) It adopts an adaptive fusion module to integrate the obtained dependency information and the original semantic signals. Moreover, DAFA reconstructs the attention calculation flow and provides better interpretability. By applying it on BERT, our method achieves state-of-the-art or competitive performance on 10 public datasets, demonstrating the benefits of adaptively fusing dependency structure in semantic matching task.", + "author": "Jian Song; Di Liang; Rumei Li; Yuntao Li; Sirui Wang; Minlong Peng; Wei Wu; Yongxin Yu", + "authorids": "/j/jian-song/; /d/di-liang/; /r/rumei-li/; /y/yuntao-li/; /s/sirui-wang/; /m/minlong-peng/; /w/wei-wu/; /y/yongxin-yu/", + "bibtex": "@inproceedings{song-etal-2022-improving-semantic,\n title = \"Improving Semantic Matching through Dependency-Enhanced Pre-trained Model with Adaptive Fusion\",\n author = \"Song, Jian and\n Liang, Di and\n Li, Rumei and\n Li, Yuntao and\n Wang, Sirui and\n Peng, Minlong and\n Wu, Wei and\n Yu, Yongxin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.4/\",\n doi = \"10.18653/v1/2022.findings-emnlp.4\",\n pages = \"45--57\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.4.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.4/", + "pdf_size": 2578886, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1394349080883876267&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Tianjin University, Tianjin, China; Meituan Inc., Beijing, China; Meituan Inc., Beijing, China; Meituan Inc., Beijing, China; Meituan Inc., Beijing, China; Fudan University, Shanghai, China; Meituan Inc., Beijing, China; Tianjin University, Tianjin, China", + "aff_domain": "tju.edu.cn;meituan.com;meituan.com;meituan.com;meituan.com;fudan.edu.cn;meituan.com;tju.edu.cn", + "email": "tju.edu.cn;meituan.com;meituan.com;meituan.com;meituan.com;fudan.edu.cn;meituan.com;tju.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;1;1;1;2;1;0", + "aff_unique_norm": "Tianjin University;Meituan Inc.;Fudan University", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.tju.edu.cn;https://www.meituan.com;https://www.fudan.edu.cn", + "aff_unique_abbr": "Tianjin U;Meituan;Fudan", + "aff_campus_unique_index": "0;1;1;1;1;2;1;0", + "aff_campus_unique": "Tianjin;Beijing;Shanghai", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.300", + "title": "Improving Sharpness-Aware Minimization with Fisher Mask for Better Generalization on Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Fine-tuning large pretrained language models on a limited training corpus usually suffers from poor generalization. Prior works show that the recently-proposed sharpness-aware minimization (SAM) optimization method can improve the model generalization. However, SAM adds a perturbation to each model parameter equally (but not all parameters contribute equally to the optimization of training), which we argue is sub-optimal and will lead to excessive computation. In this paper, we propose a novel optimization procedure, namely FSAM, which introduces a Fisher mask to improve the efficiency and performance of SAM. In short, instead of adding perturbation to all parameters, FSAM uses the Fisher information to identity the important parameters and formulates a Fisher mask to obtain the sparse perturbation, i.e., making the optimizer focus on these important parameters. Experiments on various tasks in GLUE and SuperGLUE benchmarks show that FSAM consistently outperforms the vanilla SAM by 0.67 1.98 average score among four different pretrained models. We also empirically show that FSAM works well in other complex scenarios, e.g., fine-tuning on generation tasks or limited training data. Encouragingly, when training data is limited, FSAM improves the SAM by a large margin, i.e., up to 15.1.", + "author": "Qihuang Zhong; Liang Ding; Li Shen; Peng Mi; Juhua Liu; Bo Du; Dacheng Tao", + "authorids": "/q/qihuang-zhong/; /l/liang-ding/; /l/li-shen/; /p/peng-mi/; /j/juhua-liu/; /b/bo-du/; /d/dacheng-tao/", + "bibtex": "@inproceedings{zhong-etal-2022-improving,\n title = \"Improving Sharpness-Aware Minimization with Fisher Mask for Better Generalization on Language Models\",\n author = \"Zhong, Qihuang and\n Ding, Liang and\n Shen, Li and\n Mi, Peng and\n Liu, Juhua and\n Du, Bo and\n Tao, Dacheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.300/\",\n doi = \"10.18653/v1/2022.findings-emnlp.300\",\n pages = \"4064--4085\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.300.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.300/", + "pdf_size": 2525918, + "gs_citation": 58, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11657949704428007601&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "National Engineering Research Center for Multimedia Software, Institute of Artificial Intelligence, School of Computer Science and Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University, China+JD Explore Academy, China; JD Explore Academy, China; School of Informatics, Xiamen University, China; School of Informatics, Xiamen University, China; Research Center for Graphic Communication, Printing and Packaging, and Institute of Artificial Intelligence, Wuhan University, China+JD Explore Academy, China; National Engineering Research Center for Multimedia Software, Institute of Artificial Intelligence, School of Computer Science and Hubei Key Laboratory of Multimedia and Network Communication Engineering, Wuhan University, China+JD Explore Academy, China; JD Explore Academy, China", + "aff_domain": "whu.edu.cn;jd.com;jd.com;stu.xmu.edu.cn;whu.edu.cn;whu.edu.cn;gmail.com", + "email": "whu.edu.cn;jd.com;jd.com;stu.xmu.edu.cn;whu.edu.cn;whu.edu.cn;gmail.com", + "github": "https://github.com/WHU-ZQH/FSAM4PLM", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1;2;2;0+1;0+1;1", + "aff_unique_norm": "Wuhan University;JD Explore Academy;Xiamen University", + "aff_unique_dep": "School of Computer Science;;School of Informatics", + "aff_unique_url": "http://www.whu.edu.cn;;https://www.xmu.edu.cn", + "aff_unique_abbr": "WHU;;XMU", + "aff_campus_unique_index": "0;;0", + "aff_campus_unique": "Wuhan;", + "aff_country_unique_index": "0+0;0;0;0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.322", + "title": "Improving Stability of Fine-Tuning Pretrained Language Models via Component-Wise Gradient Norm Clipping", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Fine-tuning over large pretrained language models (PLMs) has established many state-of-the-art results. Despite its superior performance, such fine-tuning can be unstable, resulting in significant variance in performance and potential risks for practical applications. Previous works have attributed such instability to the catastrophic forgetting problem in the top layers of PLMs, which indicates iteratively fine-tuning layers in a top-down manner is a promising solution. In this paper, we first point out that this method does not always work out due to the different convergence speeds of different layers/modules. Inspired by this observation, we propose a simple component-wise gradient norm clipping method to adjust the convergence speed for different components. Experiment results demonstrate that our method achieves consistent improvements in terms of generalization performance, convergence speed, and training stability. The codebase can be found at https://github.com/yangalan123/FineTuningStability.", + "author": "Chenghao Yang; Xuezhe Ma", + "authorids": "/c/chenghao-yang/; /x/xuezhe-ma/", + "bibtex": "@inproceedings{yang-ma-2022-improving,\n title = \"Improving Stability of Fine-Tuning Pretrained Language Models via Component-Wise Gradient Norm Clipping\",\n author = \"Yang, Chenghao and\n Ma, Xuezhe\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.322/\",\n doi = \"10.18653/v1/2022.emnlp-main.322\",\n pages = \"4854--4859\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.322.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.322/", + "pdf_size": 257004, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11133772007331181821&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "University of Chicago; University of Southern California", + "aff_domain": "gmail.com;isi.edu", + "email": "gmail.com;isi.edu", + "github": "https://github.com/yangalan123/FineTuningStability", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Chicago;University of Southern California", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uchicago.edu;https://www.usc.edu", + "aff_unique_abbr": "UChicago;USC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.428", + "title": "Improving Temporal Generalization of Pre-trained Language Models with Lexical Semantic Change", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent research has revealed that neural language models at scale suffer from poor temporal generalization capability, i.e., language model pre-trained on static data from past years performs worse over time on emerging data. Existing methods mainly perform continual training to mitigate such a misalignment. While effective to some extent but is far from being addressed on both the language modeling and downstream tasks. In this paper, we empirically observe that temporal generalization is closely affiliated with lexical semantic change, which is one of the essential phenomena of natural languages. Based on this observation, we propose a simple yet effective lexical-level masking strategy to post-train a converged language model. Experiments on two pre-trained language models, two different classification tasks, and four benchmark datasets demonstrate the effectiveness of our proposed method over existing temporal adaptation methods, i.e., continual training with new data. Our code is available at https://github.com/zhaochen0110/LMLM.", + "author": "Zhaochen Su; Zecheng Tang; Xinyan Guan; Lijun Wu; Min Zhang; Juntao Li", + "authorids": "/z/zhaochen-su/; /z/zecheng-tang/; /x/xinyan-guan/; /l/lijun-wu/; /m/min-zhang/; /j/juntao-li/", + "bibtex": "@inproceedings{su-etal-2022-improving,\n title = \"Improving Temporal Generalization of Pre-trained Language Models with Lexical Semantic Change\",\n author = \"Su, Zhaochen and\n Tang, Zecheng and\n Guan, Xinyan and\n Wu, Lijun and\n Zhang, Min and\n Li, Juntao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.428/\",\n doi = \"10.18653/v1/2022.emnlp-main.428\",\n pages = \"6380--6393\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.428.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.428/", + "pdf_size": 840122, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3148751900437544494&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Institute of Computer Science and Technology, Soochow University, China; Institute of Computer Science and Technology, Soochow University, China; Institute of Computer Science and Technology, Soochow University, China; Institute of Computer Science and Technology, Soochow University, China; Microsoft Research Asia; Institute of Computer Science and Technology, Soochow University, China", + "aff_domain": "gmail.com;gmail.com;gmail.com;suda.edu.cn;microsoft.com;suda.edu.cn", + "email": "gmail.com;gmail.com;gmail.com;suda.edu.cn;microsoft.com;suda.edu.cn", + "github": "https://github.com/zhaochen0110/LMLM", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "Soochow University;Microsoft Research", + "aff_unique_dep": "Institute of Computer Science and Technology;Research", + "aff_unique_url": "https://eng.suda.edu.cn/;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": ";MSR Asia", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-industry.31", + "title": "Improving Text-to-SQL Semantic Parsing with Fine-grained Query Understanding", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Most recent research on Text-to-SQL semantic parsing relies on either parser itself or simple heuristic based approach to understand natural language query (NLQ). When synthesizing a SQL query, there is no explicit semantic information of NLQ available to the parser which leads to undesirable generalization performance. In addition, without lexical-level fine-grained query understanding, linking between query and database can only rely on fuzzy string match which leads to suboptimal performance in real applications. In view of this, in this paper we present a general-purpose, modular neural semantic parsing framework that is based on token-level fine-grained query understanding. Our framework consists of three modules: named entity recognizer (NER), neural entity linker (NEL) and neural semantic parser (NSP). By jointly modeling query and database, NER model analyzes user intents and identifies entities in the query. NEL model links typed entities to schema and cell values in database. Parser model leverages available semantic information and linking results and synthesizes tree-structured SQL queries based on dynamically generated grammar. Experiments on SQUALL, a newly released semantic parsing dataset, show that we can achieve 56.8% execution accuracy on WikiTableQuestions (WTQ) test set, which outperforms the state-of-the-art model by 2.7%.", + "author": "Jun Wang; Patrick Ng; Alexander Hanbo Li; Jiarong Jiang; Zhiguo Wang; Bing Xiang; Ramesh Nallapati; Sudipta Sengupta", + "authorids": "/j/jun-wang/; /p/patrick-ng/; /a/alexander-hanbo-li/; /j/jiarong-jiang/; /z/zhiguo-wang/; /b/bing-xiang/; /r/ramesh-nallapati/; /s/sudipta-sengupta/", + "bibtex": "@inproceedings{wang-etal-2022-improving-text,\n title = \"Improving Text-to-{SQL} Semantic Parsing with Fine-grained Query Understanding\",\n author = \"Wang, Jun and\n Ng, Patrick and\n Li, Alexander Hanbo and\n Jiang, Jiarong and\n Wang, Zhiguo and\n Xiang, Bing and\n Nallapati, Ramesh and\n Sengupta, Sudipta\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.31/\",\n doi = \"10.18653/v1/2022.emnlp-industry.31\",\n pages = \"306--312\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.31.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.31/", + "pdf_size": 374888, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2050217385682849868&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Amazon AWS AI Labs; Amazon AWS AI Labs; Amazon AWS AI Labs; Amazon AWS AI Labs; Amazon AWS AI Labs; Amazon AWS AI Labs; Amazon AWS AI Labs; Amazon AWS AI Labs", + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Amazon", + "aff_unique_dep": "AWS AI Labs", + "aff_unique_url": "https://aws.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.485", + "title": "Improving Zero-Shot Multilingual Translation with Universal Representations and Cross-Mapping", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The many-to-many multilingual neural machine translation can translate between language pairs unseen during training, i.e., zero-shot translation. Improving zero-shot translation requires the model to learn universal representations and cross-mapping relationships to transfer the knowledge learned on the supervised directions to the zero-shot directions. In this work, we propose the state mover\u2019s distance based on the optimal theory to model the difference of the representations output by the encoder. Then, we bridge the gap between the semantic-equivalent representations of different languages at the token level by minimizing the proposed distance to learn universal representations. Besides, we propose an agreement-based training scheme, which can help the model make consistent predictions based on the semantic-equivalent sentences to learn universal cross-mapping relationships for all translation directions. The experimental results on diverse multilingual datasets show that our method can improve consistently compared with the baseline system and other contrast methods. The analysis proves that our method can better align the semantic space and improve the prediction consistency.", + "author": "Shuhao Gu; Yang Feng", + "authorids": "/s/shuhao-gu/; /y/yang-feng/", + "bibtex": "@inproceedings{gu-feng-2022-improving,\n title = \"Improving Zero-Shot Multilingual Translation with Universal Representations and Cross-Mapping\",\n author = \"Gu, Shuhao and\n Feng, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.485/\",\n doi = \"10.18653/v1/2022.findings-emnlp.485\",\n pages = \"6492--6504\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.485.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.485/", + "pdf_size": 7682813, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18216188592980696371&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences", + "aff_domain": "ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn", + "github": "https://github.com/ictnlp/Zero-MNMT", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Computing Technology;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.125", + "title": "Improving compositional generalization for multi-step quantitative reasoning in question answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Quantitative reasoning is an important aspect of question answering, especially when numeric and verbal cues interact to indicate sophisticated, multi-step programs. In this paper, we demonstrate how modeling the compositional nature of quantitative text can enhance the performance and robustness of QA models, allowing them to capture arithmetic logic that is expressed verbally. Borrowing from the literature on semantic parsing, we propose a method that encourages the QA models to adjust their attention patterns and capture input/output alignments that are meaningful to the reasoning task. We show how this strategy improves program accuracy and renders the models more robust against overfitting as the number of reasoning steps grows. Our approach is designed as a standalone module which can be prepended to many existing models and trained in an end-to-end fashion without the need for additional supervisory signal. As part of this exercise, we also create a unified dataset building on four previously released numerical QA datasets over tabular data.", + "author": "Armineh Nourbakhsh; Cathy Jiao; Sameena Shah; Carolyn Ros\u00e9", + "authorids": "/a/armineh-nourbakhsh/; /c/cathy-jiao/; /s/sameena-shah/; /c/carolyn-rose/", + "bibtex": "@inproceedings{nourbakhsh-etal-2022-improving,\n title = \"Improving compositional generalization for multi-step quantitative reasoning in question answering\",\n author = \"Nourbakhsh, Armineh and\n Jiao, Cathy and\n Shah, Sameena and\n Ros{\\'e}, Carolyn\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.125/\",\n doi = \"10.18653/v1/2022.emnlp-main.125\",\n pages = \"1916--1932\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.125.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.125/", + "pdf_size": 2594821, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11472000150730070906&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Language Technologies Institute, Carnegie Mellon University + J.P. Morgan AI Research; Language Technologies Institute, Carnegie Mellon University; J.P. Morgan AI Research; Language Technologies Institute, Carnegie Mellon University", + "aff_domain": "cs.cmu.edu; ; ; ", + "email": "cs.cmu.edu; ; ; ", + "github": "https://github.com/ArmiNouri/CompAQT", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;1;0", + "aff_unique_norm": "Carnegie Mellon University;J.P. Morgan", + "aff_unique_dep": "Language Technologies Institute;AI Research", + "aff_unique_url": "https://www.cmu.edu;https://www.jpmorgan.com", + "aff_unique_abbr": "CMU;JPM", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Pittsburgh;", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.105", + "title": "Improving the Extraction of Supertags for Constituency Parsing with Linear Context-Free Rewriting Systems", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In parsing phrase structures, supertagging achieves a symbiosis between the interpretability of formal grammars and the accuracy and speed of more recent neural models.The approach was only recently transferred to parsing discontinuous constituency structures with linear context-free rewriting systems (LCFRS).We reformulate and parameterize the previously fixed extraction process for LCFRS supertags with the aim to improve the overall parsing quality.These parameters are set in the context of several steps in the extraction process and are used to control the granularity of extracted grammar rules as well as the association of lexical symbols with each supertag.We evaluate the influence of the parameters on the sets of extracted supertags and the parsing quality using three treebanks in the English and German language, and we compare the best-performing configurations to recent state-of-the-art parsers in the area.Our results show that some of our configurations and the slightly modified parsing process improve the quality and speed of parsing with our supertags over the previous approach.Moreover, we achieve parsing scores that either surpass or are among the state-of-the-art in discontinuous constituent parsing.", + "author": "Thomas Ruprecht", + "authorids": "/t/thomas-ruprecht/", + "bibtex": "@inproceedings{ruprecht-2022-improving,\n title = \"Improving the Extraction of Supertags for Constituency Parsing with Linear Context-Free Rewriting Systems\",\n author = \"Ruprecht, Thomas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.105/\",\n doi = \"10.18653/v1/2022.findings-emnlp.105\",\n pages = \"1466--1477\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.105.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.105/", + "pdf_size": 302027, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:dZuXsSXRmtYJ:scholar.google.com/&scioq=Improving+the+Extraction+of+Supertags+for+Constituency+Parsing+with+Linear+Context-Free+Rewriting+Systems&hl=en&as_sdt=0,48", + "gs_version_total": 0, + "aff": "Faculty of Computer Science, Technische Universit\u00e4t Dresden", + "aff_domain": "tu-dresden.de", + "email": "tu-dresden.de", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Technische Universit\u00e4t Dresden", + "aff_unique_dep": "Faculty of Computer Science", + "aff_unique_url": "https://tu-dresden.de", + "aff_unique_abbr": "TUD", + "aff_country_unique_index": "0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.319", + "title": "Improving the Factual Correctness of Radiology Report Generation with Semantic Rewards", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Neural image-to-text radiology report generation systems offer the potential to improve radiology reporting by reducing the repetitive process of report drafting and identifying possible medical errors. These systems have achieved promising performance as measured by widely used NLG metrics such as BLEU and CIDEr. However, the current systems face important limitations. First, they present an increased complexity in architecture that offers only marginal improvements on NLG metrics. Secondly, these systems that achieve high performance on these metrics are not always factually complete or consistent due to both inadequate training and evaluation. Recent studies have shown the systems can be substantially improved by using new methods encouraging 1) the generation of domain entities consistent with the reference and 2) describing these entities in inferentially consistent ways. So far, these methods rely on weakly-supervised approaches (rule-based) and named entity recognition systems that are not specific to the chest X-ray domain. To overcome this limitation, we propose a new method, the RadGraph reward, to further improve the factual completeness and correctness of generated radiology reports. More precisely, we leverage the RadGraph dataset containing annotated chest X-ray reports with entities and relations between entities. On two open radiology report datasets, our system substantially improves the scores up to 14.2% and 25.3% on metrics evaluating the factual correctness and completeness of reports.", + "author": "Jean-Benoit Delbrouck; Pierre Chambon; Christian Bluethgen; Emily Tsai; Omar Almusa; Curtis Langlotz", + "authorids": "/j/jean-benoit-delbrouck/; /p/pierre-chambon/; /c/christian-bluethgen/; /e/emily-tsai/; /o/omar-almusa/; /c/curtis-langlotz/", + "bibtex": "@inproceedings{delbrouck-etal-2022-improving,\n title = \"Improving the Factual Correctness of Radiology Report Generation with Semantic Rewards\",\n author = \"Delbrouck, Jean-Benoit and\n Chambon, Pierre and\n Bluethgen, Christian and\n Tsai, Emily and\n Almusa, Omar and\n Langlotz, Curtis\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.319/\",\n doi = \"10.18653/v1/2022.findings-emnlp.319\",\n pages = \"4348--4360\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.319.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.319/", + "pdf_size": 1081478, + "gs_citation": 73, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14943173254494922513&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Stanford University; Stanford University; Stanford University; Stanford University; Stanford University; Stanford University", + "aff_domain": "stanford.edu; ; ; ; ; ", + "email": "stanford.edu; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.258", + "title": "Improving the Sample Efficiency of Prompt Tuning with Domain Adaptation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Prompt tuning, or the conditioning of a frozen pretrained language model (PLM) with soft prompts learned from data, has demonstrated impressive performance on a wide range of NLP tasks. However, prompt tuning requires a large training dataset to be effective and is outperformed by finetuning the entire PLM in data-scarce regimes. Previous work (Gu et al., 2022, Vu et al., 2022) proposed to transfer soft prompts pretrained on the source domain to the target domain. In this paper, we explore domain adaptation for prompt tuning, a problem setting where unlabeled data from the target domain are available during pretraining. We propose bOosting Prompt TunIng with doMain Adaptation (OPTIMA), which regularizes the decision boundary to be smooth around regions where source and target data distributions are similar. Extensive experiments demonstrate that OPTIMA significantly enhances the transferability and sample-efficiency of prompt tuning compared to strong baselines. Moreover, in few-shot settings, OPTIMA exceeds full-model tuning by a large margin.", + "author": "Xu Guo; Boyang Li; Han Yu", + "authorids": "/x/xu-guo/; /b/boyang-li/; /h/han-yu/", + "bibtex": "@inproceedings{guo-etal-2022-improving,\n title = \"Improving the Sample Efficiency of Prompt Tuning with Domain Adaptation\",\n author = \"Guo, Xu and\n Li, Boyang and\n Yu, Han\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.258/\",\n doi = \"10.18653/v1/2022.findings-emnlp.258\",\n pages = \"3523--3537\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.258.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.258/", + "pdf_size": 500055, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14918360719871742624&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 5, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.193", + "title": "In-Context Learning for Few-Shot Dialogue State Tracking", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Collecting and annotating task-oriented dialogues is time-consuming and costly. Thus, zero and few shot learning for dialogue tasks presents an exciting opportunity. In this work, we propose an in-context (IC) learning framework for zero-shot and few-shot learning dialogue state tracking (DST), where a large pretrained language model (LM) takes a test instance and a few exemplars as input, and directly decodes the dialogue state without any parameter updates. This approach is more flexible and scalable than prior DST work when adapting to new domains and scenarios. To better leverage a tabular domain description in the LM prompt, we reformulate DST into a text-to-SQL problem. We also propose a novel approach to retrieve annotated dialogues as exemplars. Empirical results on MultiWOZ show that our method IC-DST substantially outperforms previous fine-tuned state-of-the-art models in few-shot settings. In addition, we test IC-DST in zero-shot settings, in which the model only takes a fixed task instruction as input, finding that it outperforms previous zero-shot methods by a large margin.", + "author": "Yushi Hu; Chia-Hsuan Lee; Tianbao Xie; Tao Yu; Noah A. Smith; Mari Ostendorf", + "authorids": "/y/yushi-hu/; /c/chia-hsuan-lee/; /t/tianbao-xie/; /t/tao-yu/; /n/noah-a-smith/; /m/mari-ostendorf/", + "bibtex": "@inproceedings{hu-etal-2022-context,\n title = \"In-Context Learning for Few-Shot Dialogue State Tracking\",\n author = \"Hu, Yushi and\n Lee, Chia-Hsuan and\n Xie, Tianbao and\n Yu, Tao and\n Smith, Noah A. and\n Ostendorf, Mari\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.193/\",\n doi = \"10.18653/v1/2022.findings-emnlp.193\",\n pages = \"2627--2643\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.193.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.193/", + "pdf_size": 912209, + "gs_citation": 129, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15413760495942707422&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "University of Washington; University of Washington; University of Hong Kong; University of Washington; University of Washington; Allen Institute for Artificial Intelligence", + "aff_domain": "washington.edu;washington.edu; ;washington.edu;allenai.org;washington.edu", + "email": "washington.edu;washington.edu; ;washington.edu;allenai.org;washington.edu", + "github": "https://github.com/Yushi-Hu/IC-DST", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;2", + "aff_unique_norm": "University of Washington;University of Hong Kong;Allen Institute for Artificial Intelligence", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.washington.edu;https://www.hku.hk;https://allenai.org", + "aff_unique_abbr": "UW;HKU;AI2", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.614", + "title": "Incorporating Relevance Feedback for Information-Seeking Retrieval using Few-Shot Document Re-Ranking", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pairing a lexical retriever with a neural re-ranking model has set state-of-the-art performance on large-scale information retrieval datasets. This pipeline covers scenarios like question answering or navigational queries, however, for information-seeking scenarios, users often provide information on whether a document is relevant to their query in form of clicks or explicit feedback. Therefore, in this work, we explore how relevance feedback can be directly integrated into neural re-ranking models by adopting few-shot and parameter-efficient learning techniques. Specifically, we introduce a kNN approach that re-ranks documents based on their similarity with the query and the documents the user considers relevant. Further, we explore Cross-Encoder models that we pre-train using meta-learning and subsequently fine-tune for each query, training only on the feedback documents. To evaluate our different integration strategies, we transform four existing information retrieval datasets into the relevance feedback scenario. Extensive experiments demonstrate that integrating relevance feedback directly in neural re-ranking models improves their performance, and fusing lexical ranking with our best performing neural re-ranker outperforms all other methods by 5.2% nDCG@20.", + "author": "Tim Baumg\u00e4rtner; Leonardo F. R. Ribeiro; Nils Reimers; Iryna Gurevych", + "authorids": "/t/tim-baumgartner/; /l/leonardo-f-r-ribeiro/; /n/nils-reimers/; /i/iryna-gurevych/", + "bibtex": "@inproceedings{baumgartner-etal-2022-incorporating,\n title = \"Incorporating Relevance Feedback for Information-Seeking Retrieval using Few-Shot Document Re-Ranking\",\n author = {Baumg{\\\"a}rtner, Tim and\n Ribeiro, Leonardo F. R. and\n Reimers, Nils and\n Gurevych, Iryna},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.614/\",\n doi = \"10.18653/v1/2022.emnlp-main.614\",\n pages = \"8988--9005\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.614.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.614/", + "pdf_size": 501712, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13711109957100554149&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Ubiquitous Knowledge Processing Lab (UKP Lab), Department of Computer Science and Hessian Center for AI (hessian.AI), Technical University of Darmstadt; Ubiquitous Knowledge Processing Lab (UKP Lab), Department of Computer Science and Hessian Center for AI (hessian.AI), Technical University of Darmstadt + Amazon Alexa AI; cohere.ai; Ubiquitous Knowledge Processing Lab (UKP Lab), Department of Computer Science and Hessian Center for AI (hessian.AI), Technical University of Darmstadt", + "aff_domain": "; ; ; ", + "email": "; ; ; ", + "github": "https://github.com/UKPLab/incorporating-relevance", + "project": "www.ukp.tu-darmstadt.de", + "author_num": 4, + "aff_unique_index": "0;0+1;2;0", + "aff_unique_norm": "Technical University of Darmstadt;Amazon;Cohere AI", + "aff_unique_dep": "Department of Computer Science;Alexa AI;", + "aff_unique_url": "https://www.tu-darmstadt.de;https://www.amazon.com;https://www.cohere.ai", + "aff_unique_abbr": "TU Darmstadt;Amazon;Cohere", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;2;0", + "aff_country_unique": "Germany;United States;Canada" + }, + { + "id": "2022.emnlp-main.453", + "title": "Increasing Visual Awareness in Multimodal Neural Machine Translation from an Information Theoretic Perspective", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multimodal machine translation (MMT) aims to improve translation quality by equipping the source sentence with its corresponding image. Despite the promising performance, MMT models still suffer the problem of input degradation: models focus more on textual information while visual information is generally overlooked. In this paper, we endeavor to improve MMT performance by increasing visual awareness from an information theoretic perspective. In detail, we decompose the informative visual signals into two parts: source-specific information and target-specific information. We use mutual information to quantify them and propose two methods for objective optimization to better leverage visual signals. Experiments on two datasets demonstrate that our approach can effectively enhance the visual awareness of MMT model and achieve superior results against strong baselines.", + "author": "Baijun Ji; Tong Zhang; Yicheng Zou; Bojie Hu; Si Shen", + "authorids": "/b/baijun-ji/; /t/tong-zhang/; /y/yicheng-zou/; /b/bojie-hu/; /s/si-shen/", + "bibtex": "@inproceedings{ji-etal-2022-increasing,\n title = \"Increasing Visual Awareness in Multimodal Neural Machine Translation from an Information Theoretic Perspective\",\n author = \"Ji, Baijun and\n Zhang, Tong and\n Zou, Yicheng and\n Hu, Bojie and\n Shen, Si\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.453/\",\n doi = \"10.18653/v1/2022.emnlp-main.453\",\n pages = \"6755--6764\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.453.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.453/", + "pdf_size": 626548, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1668099245857782002&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Tencent Minority-Mandarin Translation, Beijing, China; Tencent Minority-Mandarin Translation, Beijing, China; School of Computer Science, Fudan University; Tencent Minority-Mandarin Translation, Beijing, China; Research Base on Interdisciplinary Terminology and Translation, Nanjing University", + "aff_domain": "tencent.com;tencent.com;fudan.edu.cn;tencent.com; ", + "email": "tencent.com;tencent.com;fudan.edu.cn;tencent.com; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;2", + "aff_unique_norm": "Tencent;Fudan University;Nanjing University", + "aff_unique_dep": "Minority-Mandarin Translation;School of Computer Science;Research Base on Interdisciplinary Terminology and Translation", + "aff_unique_url": "https://www.tencent.com;https://www.fudan.edu.cn;http://www.nju.edu.cn", + "aff_unique_abbr": "Tencent;Fudan;Nanjing U", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.360", + "title": "IndicNLG Benchmark: Multilingual Datasets for Diverse NLG Tasks in Indic Languages", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Natural Language Generation (NLG) for non-English languages is hampered by the scarcity of datasets in these languages. We present the IndicNLG Benchmark, a collection of datasets for benchmarking NLG for 11 Indic languages. We focus on five diverse tasks, namely, biography generation using Wikipedia infoboxes, news headline generation, sentence summarization, paraphrase generation and, question generation. We describe the created datasets and use them to benchmark the performance of several monolingual and multilingual baselines that leverage pre-trained sequence-to-sequence models. Our results exhibit the strong performance of multilingual language-specific pre-trained models, and the utility of models trained on our dataset for other related NLG tasks. Our dataset creation methods can be easily applied to modest-resource languages as they involve simple steps such as scraping news articles and Wikipedia infoboxes, light cleaning, and pivoting through machine translation data. To the best of our knowledge, the IndicNLG Benchmark is the first NLG benchmark for Indic languages and the most diverse multilingual NLG dataset, with approximately 8M examples across 5 tasks and 11 languages. The datasets and models will be publicly available.", + "author": "Aman Kumar; Himani Shrotriya; Prachi Sahu; Amogh Mishra; Raj Dabre; Ratish Puduppully; Anoop Kunchukuttan; Mitesh M. Khapra; Pratyush Kumar", + "authorids": "/a/aman-kumar/; /h/himani-shrotriya/; /p/prachi-sahu/; /a/amogh-mishra/; /r/raj-dabre/; /r/ratish-puduppully/; /a/anoop-kunchukuttan/; /m/mitesh-m-khapra/; /p/pratyush-kumar/", + "bibtex": "@inproceedings{kumar-etal-2022-indicnlg,\n title = \"{I}ndic{NLG} Benchmark: Multilingual Datasets for Diverse {NLG} Tasks in {I}ndic Languages\",\n author = \"Kumar, Aman and\n Shrotriya, Himani and\n Sahu, Prachi and\n Mishra, Amogh and\n Dabre, Raj and\n Puduppully, Ratish and\n Kunchukuttan, Anoop and\n Khapra, Mitesh M. and\n Kumar, Pratyush\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.360/\",\n doi = \"10.18653/v1/2022.emnlp-main.360\",\n pages = \"5363--5394\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.360.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.360/", + "pdf_size": 427073, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7805241910275660940&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "IIT Madras; IIT Madras; IIT Madras; National Institute of Information and Communications Technology; University of Edinburgh; IIT Madras + Microsoft + AI4Bharat; Columbia University; IIT Madras + AI4Bharat; IIT Madras + Microsoft + AI4Bharat", + "aff_domain": "smail.iitm.ac.in;smail.iitm.ac.in;smail.iitm.ac.in;nict.go.jp;sms.ed.ac.uk;microsoft.com;columbia.edu;cse.iitm.ac.in;microsoft.com", + "email": "smail.iitm.ac.in;smail.iitm.ac.in;smail.iitm.ac.in;nict.go.jp;sms.ed.ac.uk;microsoft.com;columbia.edu;cse.iitm.ac.in;microsoft.com", + "github": "", + "project": "https://ai4bharat.iitm.ac.in/indicnlg-suite", + "author_num": 9, + "aff_unique_index": "0;0;0;1;2;0+3+4;5;0+4;0+3+4", + "aff_unique_norm": "Indian Institute of Technology Madras;National Institute of Information and Communications Technology;University of Edinburgh;Microsoft Corporation;AI4Bharat;Columbia University", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "https://www.iitm.ac.in;https://www.nict.go.jp/;https://www.ed.ac.uk;https://www.microsoft.com;;https://www.columbia.edu", + "aff_unique_abbr": "IITM;NICT;Edinburgh;Microsoft;;Columbia", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Madras;", + "aff_country_unique_index": "0;0;0;1;2;0+3+0;3;0+0;0+3+0", + "aff_country_unique": "India;Japan;United Kingdom;United States" + }, + { + "id": "2022.emnlp-main.755", + "title": "IndicXNLI: Evaluating Multilingual Inference for Indian Languages", + "track": "main", + "status": "Main", + "award": false, + "abstract": "While Indic NLP has made rapid advances recently in terms of the availability of corpora and pre-trained models, benchmark datasets on standard NLU tasks are limited. To this end, we introduce INDICXNLI, an NLI dataset for 11 Indic languages. It has been created by high-quality machine translation of the original English XNLI dataset and our analysis attests to the quality of INDICXNLI. By finetuning different pre-trained LMs on this INDICXNLI, we analyze various cross-lingual transfer techniques with respect to the impact of the choice of language models, languages, multi-linguality, mix-language input, etc. These experiments provide us with useful insights into the behaviour of pre-trained models for a diverse set of languages.", + "author": "Divyanshu Aggarwal; Vivek Gupta; Anoop Kunchukuttan", + "authorids": "/d/divyanshu-aggarwal/; /v/vivek-gupta/; /a/anoop-kunchukuttan/", + "bibtex": "@inproceedings{aggarwal-etal-2022-indicxnli,\n title = \"{I}ndic{XNLI}: Evaluating Multilingual Inference for {I}ndian Languages\",\n author = \"Aggarwal, Divyanshu and\n Gupta, Vivek and\n Kunchukuttan, Anoop\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.755/\",\n doi = \"10.18653/v1/2022.emnlp-main.755\",\n pages = \"10994--11006\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.755.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.755/", + "pdf_size": 341039, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=418080797066765189&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Delhi Technological University; University of Utah; Microsoft IDC+AI4Bharat", + "aff_domain": "gmail.com;cs.utah.edu;microsoft.com", + "email": "gmail.com;cs.utah.edu;microsoft.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2+3", + "aff_unique_norm": "Delhi Technological University;University of Utah;Microsoft;AI4Bharat", + "aff_unique_dep": ";;Internet Development Center;", + "aff_unique_url": "https://www.dtu.ac.in;https://www.utah.edu;https://www.microsoft.com;", + "aff_unique_abbr": "DTU;Utah;Microsoft;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1+0", + "aff_country_unique": "India;United States" + }, + { + "id": "2022.emnlp-main.50", + "title": "Inducer-tuning: Connecting Prefix-tuning and Adapter-tuning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prefix-tuning, or more generally continuous prompt tuning, has become an essential paradigm of parameter-efficient transfer learning. Using a large pre-trained language model (PLM), prefix-tuning can obtain strong performance by training only a small portion of parameters. In this paper, we propose to understand and further develop prefix-tuning through the kernel lens. Specifically, we make an analogy between prefixes and inducing variables in kernel methods and hypothesize that prefixes serving as inducing variables would improve their overall mechanism. From the kernel estimator perspective, we suggest a new variant of prefix-tuning\u2014inducer-tuning, which shares the exact mechanism as prefix-tuning while leveraging the residual form found in adapter-tuning. This mitigates the initialization issue in prefix-tuning. Through comprehensive empirical experiments on natural language understanding and generation tasks, we demonstrate that inducer-tuning can close the performance gap between prefix-tuning and fine-tuning.", + "author": "Yifan Chen; Devamanyu Hazarika; Mahdi Namazifar; Yang Liu; Di Jin; Dilek Hakkani-Tur", + "authorids": "/y/yifan-chen/; /d/devamanyu-hazarika/; /m/mahdi-namazifar/; /y/yang-liu/; /d/di-jin/; /d/dilek-hakkani-tur/", + "bibtex": "@inproceedings{chen-etal-2022-inducer,\n title = \"Inducer-tuning: Connecting Prefix-tuning and Adapter-tuning\",\n author = \"Chen, Yifan and\n Hazarika, Devamanyu and\n Namazifar, Mahdi and\n Liu, Yang and\n Jin, Di and\n Hakkani-Tur, Dilek\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.50/\",\n doi = \"10.18653/v1/2022.emnlp-main.50\",\n pages = \"793--808\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.50.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.50/", + "pdf_size": 1365599, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14271680438953856156&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "University of Illinois Urbana-Champaign; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI", + "aff_domain": "illinois.edu;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "illinois.edu;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;1", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;Amazon", + "aff_unique_dep": ";Alexa AI", + "aff_unique_url": "https://illinois.edu;https://www.amazon.com", + "aff_unique_abbr": "UIUC;Amazon", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.325", + "title": "Inducing Generalizable and Interpretable Lexica", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Lexica \u2013 words and associated scores \u2013 are widely used as simple, interpretable, generalizable language features to predict sentiment, emotions, mental health, and personality. They also provide insight into the psychological features behind those moods and traits. Such lexica, historically created by human experts, are valuable to linguists, psychologists, and social scientists, but they take years of refinement and have limited coverage. In this paper, we investigate how the lexica that provide psycholinguistic insights could be computationally induced and how they should be assessed. We identify generalizability and interpretability as two essential properties of such lexica. We induce lexica using both context-oblivious and context-aware approaches, compare their predictive performance both within the training corpus and across various corpora, and evaluate their quality using crowd-worker assessment. We find that lexica induced from context-oblivious models are more generalizable and interpretable than those from more accurate context-aware transformer models. In addition, lexicon scores can identify explanatory words more reliably than a high performing transformer with feature-importance measures like SHAP.", + "author": "Yilin Geng; Zetian Wu; Roshan Santhosh; Tejas Srivastava; Lyle Ungar; Jo\u00e3o Sedoc", + "authorids": "/y/yilin-geng/; /z/zetian-wu/; /r/roshan-santhosh/; /t/tejas-srivastava/; /l/lyle-ungar/; /j/joao-sedoc/", + "bibtex": "@inproceedings{geng-etal-2022-inducing,\n title = \"Inducing Generalizable and Interpretable Lexica\",\n author = \"Geng, Yilin and\n Wu, Zetian and\n Santhosh, Roshan and\n Srivastava, Tejas and\n Ungar, Lyle and\n Sedoc, Jo{\\~a}o\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.325/\",\n doi = \"10.18653/v1/2022.findings-emnlp.325\",\n pages = \"4430--4448\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.325.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.325/", + "pdf_size": 540191, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16950074943024669321&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 0, + "aff": "University of Pennsylvania; Johns Hopkins University; University of Pennsylvania; University of Pennsylvania; University of Pennsylvania; New York University", + "aff_domain": "seas.upenn.edu;jhu.edu;seas.upenn.edu;seas.upenn.edu;cis.upenn.edu;stern.nyu.edu", + "email": "seas.upenn.edu;jhu.edu;seas.upenn.edu;seas.upenn.edu;cis.upenn.edu;stern.nyu.edu", + "github": "https://github.com/wwbp/embedding-lexica-creation", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;2", + "aff_unique_norm": "University of Pennsylvania;Johns Hopkins University;New York University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.upenn.edu;https://www.jhu.edu;https://www.nyu.edu", + "aff_unique_abbr": "UPenn;JHU;NYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.286", + "title": "Inductive Relation Prediction with Logical Reasoning Using Contrastive Representations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Relation prediction in knowledge graphs (KGs) aims at predicting missing relations in incomplete triples, whereas the dominant embedding paradigm has a restriction on handling unseen entities during testing. In the real-world scenario, the inductive setting is more common because entities in the training process are finite. Previous methods capture an inductive ability by implicit logic in KGs. However, it would be challenging to preciously acquire entity-independent relational semantics of compositional logic rules and to deal with the deficient supervision of logic caused by the scarcity of relational semantics. To this end, we propose a novel graph convolutional network (GCN)-based model LogCo with logical reasoning by contrastive representations. LogCo firstly extracts enclosing subgraphs and relational paths between two entities to supply the entity-independence. Then a contrastive strategy for relational path instances and the subgraph is proposed for the issue of deficient supervision. The contrastive representations are learned for a joint training regime. Finally, prediction results and logic rules for reasoning are attained. Comprehensive experiments on twelve inductive datasets show that LogCo achieves outstanding performance comparing with state-of-the-art inductive relation prediction baselines.", + "author": "Yudai Pan; Jun Liu; Lingling Zhang; Tianzhe Zhao; Qika Lin; Xin Hu; Qianying Wang", + "authorids": "/y/yudai-pan/; /j/jun-liu/; /l/lingling-zhang/; /t/tianzhe-zhao/; /q/qika-lin/; /x/xin-hu/; /q/qianying-wang/", + "bibtex": "@inproceedings{pan-etal-2022-inductive,\n title = \"Inductive Relation Prediction with Logical Reasoning Using Contrastive Representations\",\n author = \"Pan, Yudai and\n Liu, Jun and\n Zhang, Lingling and\n Zhao, Tianzhe and\n Lin, Qika and\n Hu, Xin and\n Wang, Qianying\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.286/\",\n doi = \"10.18653/v1/2022.emnlp-main.286\",\n pages = \"4261--4274\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.286.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.286/", + "pdf_size": 10980765, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2831047283345859959&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 0, + "aff": "School of Computer Science and Technology, Xi\u2019an Jiaotong University+National Engineering Lab for Big Data Analytics+Shaanxi Provincial Key Laboratory of Big Data Knowledge Engineering; School of Computer Science and Technology, Xi\u2019an Jiaotong University+National Engineering Lab for Big Data Analytics+Shaanxi Provincial Key Laboratory of Big Data Knowledge Engineering; School of Computer Science and Technology, Xi\u2019an Jiaotong University+Shaanxi Provincial Key Laboratory of Big Data Knowledge Engineering; School of Computer Science and Technology, Xi\u2019an Jiaotong University; School of Computer Science and Technology, Xi\u2019an Jiaotong University; School of Computer Science and Technology, Xi\u2019an Jiaotong University; Lenovo Research, Beijing, China", + "aff_domain": "foxmail.com;xjtu.edu.cn;xjtu.edu.cn;stu.xjtu.edu.cn;foxmail.com;foxmail.com;lenovo.com", + "email": "foxmail.com;xjtu.edu.cn;xjtu.edu.cn;stu.xjtu.edu.cn;foxmail.com;foxmail.com;lenovo.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1+2;0+1+2;0+2;0;0;0;3", + "aff_unique_norm": "Xi'an Jiaotong University;National Engineering Lab for Big Data Analytics;Shaanxi Provincial Key Laboratory of Big Data Knowledge Engineering;Lenovo Research", + "aff_unique_dep": "School of Computer Science and Technology;;Big Data Knowledge Engineering;", + "aff_unique_url": "https://www.xjtu.edu.cn;;;https://www.lenovo.com", + "aff_unique_abbr": "XJTU;;;Lenovo", + "aff_campus_unique_index": "0;0;0;0;0;0;2", + "aff_campus_unique": "Xi'an;;Beijing", + "aff_country_unique_index": "0+0+0;0+0+0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.188", + "title": "Inferring Implicit Relations in Complex Questions with Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A prominent challenge for modern language understanding systems is the ability to answer implicit reasoning questions, where the required reasoning steps for answering the question are not mentioned in the text explicitly. In this work, we investigate why current models struggle with implicit reasoning question answering (QA) tasks, by decoupling inference of reasoning steps from their execution.We define a new task of implicit relation inference and construct a benchmark, IMPLICITRELATIONS, where given a question, a model should output a list of concept-relation pairs, where the relations describe the implicit reasoning steps required for answering the question.Using IMPLICITRELATIONS, we evaluate models from the GPT-3 family and find that, while these models struggle on the implicit reasoning QA task, they often succeed at inferring implicit relations.This suggests that the challenge in implicit reasoning questions does not stem from the need to plan a reasoning strategy alone, but to do it while also retrieving and reasoning over relevant information.", + "author": "Uri Katz; Mor Geva; Jonathan Berant", + "authorids": "/u/uri-katz/; /m/mor-geva/; /j/jonathan-berant/", + "bibtex": "@inproceedings{katz-etal-2022-inferring,\n title = \"Inferring Implicit Relations in Complex Questions with Language Models\",\n author = \"Katz, Uri and\n Geva, Mor and\n Berant, Jonathan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.188/\",\n doi = \"10.18653/v1/2022.findings-emnlp.188\",\n pages = \"2548--2566\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.188.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.188/", + "pdf_size": 935764, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12245125661259187040&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "The Blavatnik School of Computer Science, Tel-Aviv University; Allen Institute for Artificial Intelligence; The Blavatnik School of Computer Science, Tel-Aviv University", + "aff_domain": "cs.tau.ac.il;allenai.org;cs.tau.ac.il", + "email": "cs.tau.ac.il;allenai.org;cs.tau.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Tel-Aviv University;Allen Institute for Artificial Intelligence", + "aff_unique_dep": "Blavatnik School of Computer Science;", + "aff_unique_url": "https://www.tau.ac.il;https://allenai.org", + "aff_unique_abbr": "TAU;AI2", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Tel-Aviv;", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Israel;United States" + }, + { + "id": "2022.findings-emnlp.520", + "title": "Inferring the Reader: Guiding Automated Story Generation with Commonsense Reasoning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Transformer-based language model approaches to automated story generation currently provide state-of-the-art results. However, they still suffer from plot incoherence when generatingnarratives over time, and critically lack basiccommonsense reasoning. Furthermore, existing methods generally focus only on single-character stories, or fail to track charactersat all. To improve the coherence of generated narratives and to expand the scope ofcharacter-centric narrative generation, we introduce Commonsense-inference Augmentedneural StoryTelling (CAST), a framework forintroducing commonsense reasoning into thegeneration process with the option to model theinteraction between multiple characters. Wefind that our CAST method produces significantly more coherent, on-topic, enjoyable andfluent stories than existing models in both thesingle-character and two-character settings inthree storytelling domains.", + "author": "Xiangyu Peng; Siyan Li; Sarah Wiegreffe; Mark Riedl", + "authorids": "/x/xiangyu-peng/; /s/siyan-li/; /s/sarah-wiegreffe/; /m/mark-riedl/", + "bibtex": "@inproceedings{peng-etal-2022-inferring,\n title = \"Inferring the Reader: Guiding Automated Story Generation with Commonsense Reasoning\",\n author = \"Peng, Xiangyu and\n Li, Siyan and\n Wiegreffe, Sarah and\n Riedl, Mark\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.520/\",\n doi = \"10.18653/v1/2022.findings-emnlp.520\",\n pages = \"7008--7029\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.520.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.520/", + "pdf_size": 1563600, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2272585814996579362&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Georgia Institute of Technology\u2663; Georgia Institute of Technology\u2663; Allen Institute for Artificial Intelligence\u2020; Georgia Institute of Technology\u2663", + "aff_domain": "gatech.edu;gatech.edu;gmail.com;cc.gatech.edu", + "email": "gatech.edu;gatech.edu;gmail.com;cc.gatech.edu", + "github": "https://github.com/xiangyu-peng/CAST_public", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Georgia Institute of Technology;Allen Institute for Artificial Intelligence", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.gatech.edu;https://allenai.org", + "aff_unique_abbr": "Georgia Tech;AI2", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.104", + "title": "Infinite SCAN: An Infinite Model of Diachronic Semantic Change", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this study, we propose a Bayesian model that can jointly estimate the number of senses of words and their changes through time.The model combines a dynamic topic model on Gaussian Markov random fields with a logistic stick-breaking process that realizes Dirichlet process. In the experiments, we evaluated the proposed model in terms of interpretability, accuracy in estimating the number of senses, and tracking their changes using both artificial data and real data.We quantitatively verified that the model behaves as expected through evaluation using artificial data.Using the CCOHA corpus, we showed that our model outperforms the baseline model and investigated the semantic changes of several well-known target words.", + "author": "Seiichi Inoue; Mamoru Komachi; Toshinobu Ogiso; Hiroya Takamura; Daichi Mochihashi", + "authorids": "/s/seiichi-inoue/; /m/mamoru-komachi/; /t/toshinobu-ogiso/; /h/hiroya-takamura/; /d/daichi-mochihashi/", + "bibtex": "@inproceedings{inoue-etal-2022-infinite,\n title = \"Infinite {SCAN}: An Infinite Model of Diachronic Semantic Change\",\n author = \"Inoue, Seiichi and\n Komachi, Mamoru and\n Ogiso, Toshinobu and\n Takamura, Hiroya and\n Mochihashi, Daichi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.104/\",\n doi = \"10.18653/v1/2022.emnlp-main.104\",\n pages = \"1605--1616\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.104.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.104/", + "pdf_size": 3691240, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17495191534124057800&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "Tokyo Metropolitan University; Tokyo Metropolitan University; The National Institute for Japanese Language and Linguistics; The National Institute of Advanced Industrial Science and Technology; The Institute of Statistical Mathematics", + "aff_domain": "ed.tmu.ac.jp;tmu.ac.jp;ninjal.ac.jp;aist.go.jp;ism.ac.jp", + "email": "ed.tmu.ac.jp;tmu.ac.jp;ninjal.ac.jp;aist.go.jp;ism.ac.jp", + "github": "https://github.com/seiichiinoue/iscan", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;3", + "aff_unique_norm": "Tokyo Metropolitan University;National Institute for Japanese Language and Linguistics;National Institute of Advanced Industrial Science and Technology;The Institute of Statistical Mathematics", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.tmuc.ac.jp;https://www.ninjal.ac.jp;https://www.aist.go.jp;https://www.ism.ac.jp", + "aff_unique_abbr": "TMU;NINJAL;AIST;ISM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.findings-emnlp.58", + "title": "Influence Functions for Sequence Tagging Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Many standard tasks in NLP (e.g., Named Entity Recognition, Part-of-Speech tagging, and Semantic Role Labeling) are naturally framed as sequence tagging problems. However, there has been comparatively little work on interpretability methods for sequence tagging models. In this paper, we extend influence functions \u2014 which aim to trace predictions back to the training points that informed them \u2014 to sequence tagging tasks. We define the influence of a training instance segment as the effect that perturbing the labels within this segment has on a test segment level prediction. We provide an efficient approximation to compute this, and show that it tracks with the \u201ctrue\u201d segment influence (measured empirically). We show the practical utility of segment influence by using the method to identify noisy annotations in NER corpora.", + "author": "Sarthak Jain; Varun Manjunatha; Byron Wallace; Ani Nenkova", + "authorids": "/s/sarthak-jain/; /v/varun-manjunatha/; /b/byron-c-wallace/; /a/ani-nenkova/", + "bibtex": "@inproceedings{jain-etal-2022-influence,\n title = \"Influence Functions for Sequence Tagging Models\",\n author = \"Jain, Sarthak and\n Manjunatha, Varun and\n Wallace, Byron and\n Nenkova, Ani\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.58/\",\n doi = \"10.18653/v1/2022.findings-emnlp.58\",\n pages = \"824--839\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.58.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.58/", + "pdf_size": 748821, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3834240141839214469&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Northeastern University; Adobe Research; Northeastern University; Adobe Research", + "aff_domain": "northeastern.edu;adobe.com;northeastern.edu;adobe.com", + "email": "northeastern.edu;adobe.com;northeastern.edu;adobe.com", + "github": "https://github.com/successar/Segment_Influence_Functions", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;1", + "aff_unique_norm": "Northeastern University;Adobe", + "aff_unique_dep": ";Adobe Research", + "aff_unique_url": "https://www.northeastern.edu;https://research.adobe.com", + "aff_unique_abbr": "NEU;Adobe", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.223", + "title": "InfoCSE: Information-aggregated Contrastive Learning of Sentence Embeddings", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Contrastive learning has been extensively studied in sentence embedding learning, which assumes that the embeddings of different views of the same sentence are closer. The constraint brought by this assumption is weak, and a good sentence representation should also be able to reconstruct the original sentence fragments. Therefore, this paper proposes an information-aggregated contrastive learning framework for learning unsupervised sentence embeddings, termed InfoCSE.InfoCSE forces the representation of [CLS] positions to aggregate denser sentence information by introducing an additional Masked language model task and a well-designed network. We evaluate the proposed InfoCSE on several benchmark datasets w.r.t the semantic text similarity (STS) task. Experimental results show that InfoCSE outperforms SimCSE by an average Spearman correlation of 2.60% on BERT-base, and 1.77% on BERT-large, achieving state-of-the-art results among unsupervised sentence representation learning methods.", + "author": "Xing Wu; Chaochen Gao; Zijia Lin; Jizhong Han; Zhongyuan Wang; Songlin Hu", + "authorids": "/x/xing-wu/; /c/chaochen-gao/; /z/zijia-lin/; /j/jizhong-han/; /z/zhongyuan-wang/; /s/songlin-hu/", + "bibtex": "@inproceedings{wu-etal-2022-infocse,\n title = \"{I}nfo{CSE}: Information-aggregated Contrastive Learning of Sentence Embeddings\",\n author = \"Wu, Xing and\n Gao, Chaochen and\n Lin, Zijia and\n Han, Jizhong and\n Wang, Zhongyuan and\n Hu, Songlin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.223/\",\n doi = \"10.18653/v1/2022.findings-emnlp.223\",\n pages = \"3060--3070\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.223.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.223/", + "pdf_size": 457568, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2087759464066808177&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences+Kuaishou Technology; Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences; Kuaishou Technology; Institute of Information Engineering, Chinese Academy of Sciences; Kuaishou Technology; Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences", + "aff_domain": "iie.ac.cn;iie.ac.cn;tsinghua.org.cn;iie.ac.cn;kuaishou.com;iie.ac.cn", + "email": "iie.ac.cn;iie.ac.cn;tsinghua.org.cn;iie.ac.cn;kuaishou.com;iie.ac.cn", + "github": "github.com/caskcsg/sentemb/tree/main/InfoCSE", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2;0+1;2;0;2;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Kuaishou Technology", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.kuaishou.com", + "aff_unique_abbr": "CAS;UCAS;Kuaishou", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0+0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.395", + "title": "InforMask: Unsupervised Informative Masking for Language Model Pretraining", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Masked language modeling is widely used for pretraining large language models for natural language understanding (NLU). However, random masking is suboptimal, allocating an equal masking rate for all tokens. In this paper, we propose InforMask, a new unsupervised masking strategy for training masked language models. InforMask exploits Pointwise Mutual Information (PMI) to select the most informative tokens to mask. We further propose two optimizations for InforMask to improve its efficiency. With a one-off preprocessing step, InforMask outperforms random masking and previously proposed masking strategies on the factual recall benchmark LAMA and the question answering benchmark SQuAD v1 and v2.", + "author": "Nafis Sadeq; Canwen Xu; Julian McAuley", + "authorids": "/n/nafis-sadeq/; /c/canwen-xu/; /j/julian-mcauley/", + "bibtex": "@inproceedings{sadeq-etal-2022-informask,\n title = \"{I}nfor{M}ask: Unsupervised Informative Masking for Language Model Pretraining\",\n author = \"Sadeq, Nafis and\n Xu, Canwen and\n McAuley, Julian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.395/\",\n doi = \"10.18653/v1/2022.emnlp-main.395\",\n pages = \"5866--5878\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.395.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.395/", + "pdf_size": 482186, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4012966609608672145&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of California, San Diego; University of California, San Diego; University of California, San Diego", + "aff_domain": "ucsd.edu;ucsd.edu;ucsd.edu", + "email": "ucsd.edu;ucsd.edu;ucsd.edu", + "github": "https://github.com/NafisSadeq/InforMask", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of California, San Diego", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucsd.edu", + "aff_unique_abbr": "UCSD", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "San Diego", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.280", + "title": "Information-Theoretic Text Hallucination Reduction for Video-grounded Dialogue", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Video-grounded Dialogue (VGD) aims to decode an answer sentence to a question regarding a given video and dialogue context. Despite the recent success of multi-modal reasoning to generate answer sentences, existing dialogue systems still suffer from a text hallucination problem, which denotes indiscriminate text-copying from input texts without an understanding of the question. This is due to learning spurious correlations from the fact that answer sentences in the dataset usually include the words of input texts, thus the VGD system excessively relies on copying words from input texts by hoping those words to overlap with ground-truth texts. Hence, we design Text Hallucination Mitigating (THAM) framework, which incorporates Text Hallucination Regularization (THR) loss derived from the proposed information-theoretic text hallucination measurement approach. Applying THAM with current dialogue systems validates the effectiveness on VGD benchmarks (i.e., AVSD@DSTC7 and AVSD@DSTC8) and shows enhanced interpretability.", + "author": "Sunjae Yoon; Eunseop Yoon; Hee Suk Yoon; Junyeong Kim; Chang Yoo", + "authorids": "/s/sunjae-yoon/; /e/eunseop-yoon/; /h/hee-suk-yoon/; /j/junyeong-kim/; /c/chang-yoo/", + "bibtex": "@inproceedings{yoon-etal-2022-information,\n title = \"Information-Theoretic Text Hallucination Reduction for Video-grounded Dialogue\",\n author = \"Yoon, Sunjae and\n Yoon, Eunseop and\n Yoon, Hee Suk and\n Kim, Junyeong and\n Yoo, Chang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.280/\",\n doi = \"10.18653/v1/2022.emnlp-main.280\",\n pages = \"4182--4193\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.280.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.280/", + "pdf_size": 620864, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6090132399199567772&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 6, + "aff": "Korea Advanced Institute of Science and Technology (KAIST)+Korea Advanced Institute of Science and Technology (KAIST); Korea Advanced Institute of Science and Technology (KAIST)+Korea Advanced Institute of Science and Technology (KAIST); Korea Advanced Institute of Science and Technology (KAIST)+Korea Advanced Institute of Science and Technology (KAIST); Chung-Ang University; Korea Advanced Institute of Science and Technology (KAIST)+Korea Advanced Institute of Science and Technology (KAIST)", + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;cau.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;cau.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0+0;1;0+0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology;Chung-Ang University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.kaist.ac.kr;http://www.cau.ac.kr", + "aff_unique_abbr": "KAIST;CAU", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0;0+0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.emnlp-main.65", + "title": "Information-Transport-based Policy for Simultaneous Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Simultaneous translation (ST) outputs translation while receiving the source inputs, and hence requires a policy to determine whether to translate a target token or wait for the next source token. The major challenge of ST is that each target token can only be translated based on the current received source tokens, where the received source information will directly affect the translation quality. So naturally, how much source information is received for the translation of the current target token is supposed to be the pivotal evidence for the ST policy to decide between translating and waiting. In this paper, we treat the translation as information transport from source to target and accordingly propose an Information-Transport-based Simultaneous Translation (ITST). ITST quantifies the transported information weight from each source token to the current target token, and then decides whether to translate the target token according to its accumulated received information. Experiments on both text-to-text ST and speech-to-text ST (a.k.a., streaming speech translation) tasks show that ITST outperforms strong baselines and achieves state-of-the-art performance.", + "author": "Shaolei Zhang; Yang Feng", + "authorids": "/s/shaolei-zhang/; /y/yang-feng/", + "bibtex": "@inproceedings{zhang-feng-2022-information,\n title = \"Information-Transport-based Policy for Simultaneous Translation\",\n author = \"Zhang, Shaolei and\n Feng, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.65/\",\n doi = \"10.18653/v1/2022.emnlp-main.65\",\n pages = \"992--1013\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.65.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.65/", + "pdf_size": 5193560, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10657761885772936460&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 7, + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences, Beijing, China; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences, Beijing, China", + "aff_domain": "ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn", + "github": "https://github.com/ictnlp/ITST", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Computing Technology;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.820", + "title": "Injecting Domain Knowledge in Language Models for Task-oriented Dialogue Systems", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained language models (PLM) have advanced the state-of-the-art across NLP applications, but lack domain-specific knowledge that does not naturally occur in pre-training data. Previous studies augmented PLMs with symbolic knowledge for different downstream NLP tasks. However, knowledge bases (KBs) utilized in these studies are usually large-scale and static, in contrast to small, domain-specific, and modifiable knowledge bases that are prominent in real-world task-oriented dialogue (TOD) systems. In this paper, we showcase the advantages of injecting domain-specific knowledge prior to fine-tuning on TOD tasks. To this end, we utilize light-weight adapters that can be easily integrated with PLMs and serve as a repository for facts learned from different KBs. To measure the efficacy of proposed knowledge injection methods, we introduce Knowledge Probing using Response Selection (KPRS) \u2013 a probe designed specifically for TOD models. Experiments on KPRS and the response generation task show improvements of knowledge injection with adapters over strong baselines.", + "author": "Denis Emelin; Daniele Bonadiman; Sawsan Alqahtani; Yi Zhang; Saab Mansour", + "authorids": "/d/denis-emelin/; /d/daniele-bonadiman/; /s/sawsan-alqahtani/; /y/yi-zhang/; /s/saab-mansour/", + "bibtex": "@inproceedings{emelin-etal-2022-injecting,\n title = \"Injecting Domain Knowledge in Language Models for Task-oriented Dialogue Systems\",\n author = \"Emelin, Denis and\n Bonadiman, Daniele and\n Alqahtani, Sawsan and\n Zhang, Yi and\n Mansour, Saab\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.820/\",\n doi = \"10.18653/v1/2022.emnlp-main.820\",\n pages = \"11962--11974\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.820.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.820/", + "pdf_size": 418928, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13519758113594958099&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of Edinburgh; AWS AI Labs; Princess Nourah Bint Abdulrahman + National Center of AI; AWS AI Labs; AWS AI Labs", + "aff_domain": "gmail.com;amazon.com;pnu.edu.sa;amazon.com;amazon.com", + "email": "gmail.com;amazon.com;pnu.edu.sa;amazon.com;amazon.com", + "github": "https://github.com/amazon-research/domain-knowledge-injection", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2+3;1;1", + "aff_unique_norm": "University of Edinburgh;Amazon Web Services;Princess Nourah Bint Abdulrahman University;National Center of AI", + "aff_unique_dep": ";AWS AI Labs;;", + "aff_unique_url": "https://www.ed.ac.uk;https://aws.amazon.com;https://pnu.edu.sa;", + "aff_unique_abbr": "Edinburgh;AWS;PNU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;1;1", + "aff_country_unique": "United Kingdom;United States;Saudi Arabia;" + }, + { + "id": "2022.emnlp-main.773", + "title": "Instance Regularization for Discriminative Language Model Pre-training", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Discriminative pre-trained language models (PrLMs) can be generalized as denoising auto-encoders that work with two procedures, ennoising and denoising. First, an ennoising process corrupts texts with arbitrary noising functions to construct training instances. Then, a denoising language model is trained to restore the corrupted tokens. Existing studies have made progress by optimizing independent strategies of either ennoising or denosing. They treat training instances equally throughout the training process, with little attention on the individual contribution of those instances. To model explicit signals of instance contribution, this work proposes to estimate the complexity of restoring the original sentences from corrupted ones in language model pre-training. The estimations involve the corruption degree in the ennoising data construction process and the prediction confidence in the denoising counterpart. Experimental results on natural language understanding and reading comprehension benchmarks show that our approach improves pre-training efficiency, effectiveness, and robustness. Code is publicly available at https://github.com/cooelf/InstanceReg.", + "author": "Zhuosheng Zhang; Hai Zhao; Ming Zhou", + "authorids": "/z/zhuosheng-zhang/; /h/hai-zhao/; /m/ming-zhou/", + "bibtex": "@inproceedings{zhang-etal-2022-instance,\n title = \"Instance Regularization for Discriminative Language Model Pre-training\",\n author = \"Zhang, Zhuosheng and\n Zhao, Hai and\n Zhou, Ming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.773/\",\n doi = \"10.18653/v1/2022.emnlp-main.773\",\n pages = \"11255--11265\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.773.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.773/", + "pdf_size": 935025, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1093697598619397315&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University + Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University + Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University; Langboat Technology", + "aff_domain": "sjtu.edu.cn;cs.sjtu.edu.cn;chuangxin.com", + "email": "sjtu.edu.cn;cs.sjtu.edu.cn;chuangxin.com", + "github": "https://github.com/cooelf/InstanceReg", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;1", + "aff_unique_norm": "Shanghai Jiao Tong University;Langboat Technology", + "aff_unique_dep": "Department of Computer Science and Engineering;", + "aff_unique_url": "https://www.sjtu.edu.cn;", + "aff_unique_abbr": "SJTU;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.285", + "title": "Instance-Guided Prompt Learning for Few-Shot Text Matching", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Few-shot text matching is a more practical technique in natural language processing (NLP) to determine whether two texts are semantically identical. They primarily design patterns to reformulate text matching into a pre-trained task with uniform prompts across all instances. But they fail to take into account the connection between prompts and instances. This paper argues that dynamically strengthening the correlation between particular instances and the prompts is necessary because fixed prompts cannot adequately fit all diverse instances in inference. We suggest IGATE: Instance-Guided prompt leArning for few-shoT tExt matching, a novel pluggable prompt learning method. The gate mechanism used by IGATE, which is between the embedding and the PLM encoders, makes use of the semantics of instances to regulate the effects of the gate on the prompt tokens. The experimental findings show that IGATE achieves SOTA performance on MRPC and QQP, outperforming strong baselines. GitHub will host the release of codes.", + "author": "Jia Du; Xuanyu Zhang; Siyi Wang; Kai Wang; Yanquan Zhou; Lei Li; Qing Yang; Dongliang Xu", + "authorids": "/j/jia-du/; /x/xuanyu-zhang/; /s/siyi-wang/; /k/kai-wang/; /y/yanquan-zhou/; /l/lei-li/; /q/qing-yang/; /d/dongliang-xu/", + "bibtex": "@inproceedings{du-etal-2022-instance,\n title = \"Instance-Guided Prompt Learning for Few-Shot Text Matching\",\n author = \"Du, Jia and\n Zhang, Xuanyu and\n Wang, Siyi and\n Wang, Kai and\n Zhou, Yanquan and\n Li, Lei and\n Yang, Qing and\n Xu, Dongliang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.285/\",\n doi = \"10.18653/v1/2022.findings-emnlp.285\",\n pages = \"3880--3886\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.285.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.285/", + "pdf_size": 288698, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9120749857448173790&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Beijing University of Posts and Telecommunications; Du Xiaoman Financial; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Du Xiaoman Financial; Du Xiaoman Financial", + "aff_domain": "bupt.edu.cn;duxiaoman.com;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;duxiaoman.com;duxiaoman.com", + "email": "bupt.edu.cn;duxiaoman.com;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;bupt.edu.cn;duxiaoman.com;duxiaoman.com", + "github": "https://github.com/Du-Jia/IGATE", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;0;0;0;0;1;1", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Du Xiaoman Financial", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bupt.edu.cn/;https://www.duxiaoman.com", + "aff_unique_abbr": "BUPT;DXF", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.33", + "title": "InstructDial: Improving Zero and Few-shot Generalization in Dialogue through Instruction Tuning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Instruction tuning is an emergent paradigm in NLP wherein natural language instructions are leveraged with language models to induce zero-shot performance on unseen tasks. Dialogue is an especially interesting area in which to explore instruction tuning because dialogue systems perform multiple kinds of tasks related to language (e.g., natural language understanding and generation, domain-specific interaction), yet instruction tuning has not been systematically explored for dialogue-related tasks. We introduce InstructDial, an instruction tuning framework for dialogue, which consists of a repository of 48 diverse dialogue tasks in a unified text-to-text format created from 59 openly available dialogue datasets. We explore cross-task generalization ability on models tuned on InstructDial across diverse dialogue tasks. Our analysis reveals that InstructDial enables good zero-shot performance on unseen datasets and tasks such as dialogue evaluation and intent detection, and even better performance in a few-shot setting. To ensure that models adhere to instructions, we introduce novel meta-tasks. We establish benchmark zero-shot and few-shot performance of models trained using the proposed framework on multiple dialogue tasks.", + "author": "Prakhar Gupta; Cathy Jiao; Yi-Ting Yeh; Shikib Mehri; Maxine Eskenazi; Jeffrey Bigham", + "authorids": "/p/prakhar-gupta/; /c/cathy-jiao/; /y/yi-ting-yeh/; /s/shikib-mehri/; /m/maxine-eskenazi/; /j/jeffrey-p-bigham/", + "bibtex": "@inproceedings{gupta-etal-2022-instructdial,\n title = \"{I}nstruct{D}ial: Improving Zero and Few-shot Generalization in Dialogue through Instruction Tuning\",\n author = \"Gupta, Prakhar and\n Jiao, Cathy and\n Yeh, Yi-Ting and\n Mehri, Shikib and\n Eskenazi, Maxine and\n Bigham, Jeffrey\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.33/\",\n doi = \"10.18653/v1/2022.emnlp-main.33\",\n pages = \"505--525\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.33.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.33/", + "pdf_size": 1631417, + "gs_citation": 81, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17369857370102458272&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Human-Computer Interaction Institute, Carnegie Mellon University + Language Technologies Institute, Carnegie Mellon University", + "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "github": "https://github.com/prakharguptaz/Instructdial", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0+0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Language Technologies Institute", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0;0;0;0;0+0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.14", + "title": "Interpreting Language Models with Contrastive Explanations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Model interpretability methods are often used to explain NLP model decisions on tasks such as text classification, where the output space is relatively small. However, when applied to language generation, where the output space often consists of tens of thousands of tokens, these methods are unable to provide informative explanations. Language models must consider various features to predict a token, such as its part of speech, number, tense, or semantics.Existing explanation methods conflate evidence for all these features into a single explanation, which is less interpretable for human understanding.To disentangle the different decisions in language modeling, we focus on explaining language models contrastively: we look for salient input tokens that explain why the model predicted one token instead of another. We demonstrate that contrastive explanations are quantifiably better than non-contrastive explanations in verifying major grammatical phenomena, and that they significantly improve contrastive model simulatability for human observers. We also identify groups of contrastive decisions where the model uses similar evidence, and we are able to characterize what input tokens models use during various language generation decisions.", + "author": "Kayo Yin; Graham Neubig", + "authorids": "/k/kayo-yin/; /g/graham-neubig/", + "bibtex": "@inproceedings{yin-neubig-2022-interpreting,\n title = \"Interpreting Language Models with Contrastive Explanations\",\n author = \"Yin, Kayo and\n Neubig, Graham\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.14/\",\n doi = \"10.18653/v1/2022.emnlp-main.14\",\n pages = \"184--198\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.14.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.14/", + "pdf_size": 802340, + "gs_citation": 99, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2553776721569707343&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "University of California, Berkeley + Carnegie Mellon University; Carnegie Mellon University", + "aff_domain": "berkeley.edu;cs.cmu.edu", + "email": "berkeley.edu;cs.cmu.edu", + "github": "https://github.com/kayoyin/interpret-lm", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "University of California, Berkeley;Carnegie Mellon University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.berkeley.edu;https://www.cmu.edu", + "aff_unique_abbr": "UC Berkeley;CMU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Berkeley;", + "aff_country_unique_index": "0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.799", + "title": "Interventional Training for Out-Of-Distribution Natural Language Understanding", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Out-of-distribution (OOD) settings are used to measure a model\u2019s performance when the distribution of the test data is different from that of the training data. NLU models are known to suffer in OOD. We study this issue from the perspective of causality, which sees confounding bias as the reason for models to learn spurious correlations. While a common solution is to perform intervention, existing methods handle only known and single confounder, but in many NLU tasks the confounders can be both unknown and multifactorial. In this paper, we propose a novel interventional training method called Bottom-up Automatic Intervention (BAI) that performs multi-granular intervention with identified multifactorial confounders. Our experiments on three NLU tasks, namely, natural language inference, fact verification and paraphrase identification, show the effectiveness of BAI for tackling OOD settings.", + "author": "Sicheng Yu; Jing Jiang; Hao Zhang; Yulei Niu; Qianru Sun; Lidong Bing", + "authorids": "/s/sicheng-yu/; /j/jing-jiang/; /h/hao-zhang/; /y/yulei-niu/; /q/qianru-sun/; /l/lidong-bing/", + "bibtex": "@inproceedings{yu-etal-2022-interventional,\n title = \"Interventional Training for Out-Of-Distribution Natural Language Understanding\",\n author = \"Yu, Sicheng and\n Jiang, Jing and\n Zhang, Hao and\n Niu, Yulei and\n Sun, Qianru and\n Bing, Lidong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.799/\",\n doi = \"10.18653/v1/2022.emnlp-main.799\",\n pages = \"11627--11638\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.799.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.799/", + "pdf_size": 608128, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4960739199313376016&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Singapore Management University+DAMO Academy, Alibaba Group; Singapore Management University; Nanyang Technological University; Columbia University+DAMO Academy, Alibaba Group; Singapore Management University; DAMO Academy, Alibaba Group", + "aff_domain": "phdcs.smu.edu.sg;smu.edu.sg;outlook.com;gmail.com;smu.edu.sg;alibaba-inc.com", + "email": "phdcs.smu.edu.sg;smu.edu.sg;outlook.com;gmail.com;smu.edu.sg;alibaba-inc.com", + "github": "https://github.com/PluviophileYU/BAI", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;2;3+1;0;1", + "aff_unique_norm": "Singapore Management University;Alibaba Group;Nanyang Technological University;Columbia University", + "aff_unique_dep": ";DAMO Academy;;", + "aff_unique_url": "https://www.smu.edu.sg;https://www.alibaba-group.com;https://www.ntu.edu.sg;https://www.columbia.edu", + "aff_unique_abbr": "SMU;Alibaba;NTU;Columbia", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0;2+1;0;1", + "aff_country_unique": "Singapore;China;United States" + }, + { + "id": "2022.emnlp-main.619", + "title": "Intriguing Properties of Compression on Multilingual Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multilingual models are often particularly dependent on scaling to generalize to a growing number of languages. Compression techniques are widely relied upon to reconcile the growth in model size with real world resource constraints, but compression can have a disparate effect on model performance for low-resource languages. It is thus crucial to understand the trade-offs between scale, multilingualism, and compression. In this work, we propose an experimental framework to characterize the impact of sparsifying multilingual pre-trained language models during fine-tuning.Applying this framework to mBERT named entity recognition models across 40 languages, we find that compression confers several intriguing and previously unknown generalization properties. In contrast to prior findings, we find that compression may improve model robustness over dense models. We additionally observe that under certain sparsification regimes compression may aid, rather than disproportionately impact the performance of low-resource languages.", + "author": "Kelechi Ogueji; Orevaoghene Ahia; Gbemileke Onilude; Sebastian Gehrmann; Sara Hooker; Julia Kreutzer", + "authorids": "/k/kelechi-ogueji/; /o/orevaoghene-ahia/; /g/gbemileke-onilude/; /s/sebastian-gehrmann/; /s/sara-hooker/; /j/julia-kreutzer/", + "bibtex": "@inproceedings{ogueji-etal-2022-intriguing,\n title = \"Intriguing Properties of Compression on Multilingual Models\",\n author = \"Ogueji, Kelechi and\n Ahia, Orevaoghene and\n Onilude, Gbemileke and\n Gehrmann, Sebastian and\n Hooker, Sara and\n Kreutzer, Julia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.619/\",\n doi = \"10.18653/v1/2022.emnlp-main.619\",\n pages = \"9092--9110\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.619.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.619/", + "pdf_size": 385051, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12844480190165463184&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "University of Waterloo; University of Washington; Cohere For AI Community; Google Research; Cohere For AI; Google Research", + "aff_domain": "uwaterloo.ca;cs.washington.edu;gmail.com;google.com;cohere.com;google.com", + "email": "uwaterloo.ca;cs.washington.edu;gmail.com;google.com;cohere.com;google.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;2;3", + "aff_unique_norm": "University of Waterloo;University of Washington;Cohere;Google", + "aff_unique_dep": ";;AI Community;Google Research", + "aff_unique_url": "https://uwaterloo.ca;https://www.washington.edu;https://cohere.ai;https://research.google", + "aff_unique_abbr": "UW;UW;Cohere;Google Research", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;1;1;1;1;1", + "aff_country_unique": "Canada;United States" + }, + { + "id": "2022.emnlp-main.387", + "title": "Invariant Language Modeling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Modern pretrained language models are critical components of NLP pipelines. Yet, they suffer from spurious correlations, poor out-of-domain generalization, and biases.Inspired by recent progress in causal machine learning, in particular the invariant risk minimization (IRM) paradigm, we propose invariant language modeling, a framework for learning invariant representations that generalize better across multiple environments. In particular, we adapt a game-theoretic implementation of IRM (IRM-games) to language models, where the invariance emerges from a specific training schedule in which all the environments compete to optimize their own environment-specific loss by updating subsets of the model in a round-robin fashion.We focused on controlled experiments to precisely demonstrate the ability of our method to (i) remove structured noise, (ii) ignore specific spurious correlations without affecting global performance, and (iii) achieve better out-of-domain generalization.These benefits come with a negligible computational overhead compared to standard training, do not require changing the local loss, and can be applied to any language model. We believe this framework is promising to help mitigate spurious correlations and biases in language models.", + "author": "Maxime Peyrard; Sarvjeet Ghotra; Martin Josifoski; Vidhan Agarwal; Barun Patra; Dean Carignan; Emre Kiciman; Saurabh Tiwary; Robert West", + "authorids": "/m/maxime-peyrard/; /s/sarvjeet-ghotra/; /m/martin-josifoski/; /v/vidhan-agarwal/; /b/barun-patra/; /d/dean-carignan/; /e/emre-kiciman/; /s/saurabh-tiwary/; /r/robert-west/", + "bibtex": "@inproceedings{peyrard-etal-2022-invariant,\n title = \"Invariant Language Modeling\",\n author = \"Peyrard, Maxime and\n Ghotra, Sarvjeet and\n Josifoski, Martin and\n Agarwal, Vidhan and\n Patra, Barun and\n Carignan, Dean and\n Kiciman, Emre and\n Tiwary, Saurabh and\n West, Robert\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.387/\",\n doi = \"10.18653/v1/2022.emnlp-main.387\",\n pages = \"5728--5743\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.387.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.387/", + "pdf_size": 531919, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2635225088933460125&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "EPFL; Microsoft Corporation; EPFL; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; EPFL", + "aff_domain": "epfl.ch;epfl.ch;epfl.ch;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "epfl.ch;epfl.ch;epfl.ch;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;0;1;1;1;1;1;0", + "aff_unique_norm": "Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne;Microsoft Corporation", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.epfl.ch;https://www.microsoft.com", + "aff_unique_abbr": "EPFL;Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;1;1;1;1;1;0", + "aff_country_unique": "Switzerland;United States" + }, + { + "id": "2022.findings-emnlp.368", + "title": "Invernet: An Inversion Attack Framework to Infer Fine-Tuning Datasets through Word Embeddings", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Word embedding aims to learn the dense representation of words and has become a regular input preparation in many NLP tasks. Due to the data and computation intensive nature of learning embeddings from scratch, a more affordable way is to borrow the pretrained embedding available in public and fine-tune the embedding through a domain specific downstream dataset. A privacy concern can arise if a malicious owner of the pretrained embedding gets access to the fine-tuned embedding and tries to infer the critical information from the downstream datasets. In this study, we propose a novel embedding inversion framework called Invernet that materializes the privacy concern by inferring the context distribution in the downstream dataset, which can lead to key information breach. With extensive experimental studies on two real-world news datasets: Antonio Gulli\u2019s News and New York Times, we validate the feasibility of proposed privacy attack and demonstrate the effectiveness of Invernet on inferring downstream datasets based on multiple word embedding methods.", + "author": "Ishrak Hayet; Zijun Yao; Bo Luo", + "authorids": "/i/ishrak-hayet/; /z/zijun-yao/; /b/bo-luo/", + "bibtex": "@inproceedings{hayet-etal-2022-invernet,\n title = \"Invernet: An Inversion Attack Framework to Infer Fine-Tuning Datasets through Word Embeddings\",\n author = \"Hayet, Ishrak and\n Yao, Zijun and\n Luo, Bo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.368/\",\n doi = \"10.18653/v1/2022.findings-emnlp.368\",\n pages = \"5009--5018\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.368.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.368/", + "pdf_size": 1492813, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14253766863364547685&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "University of Kansas; University of Kansas; University of Kansas", + "aff_domain": "ku.edu;ku.edu;ku.edu", + "email": "ku.edu;ku.edu;ku.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Kansas", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ku.edu", + "aff_unique_abbr": "KU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.118", + "title": "Investigating Ensemble Methods for Model Robustness Improvement of Text Classifiers", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large pre-trained language models have shown remarkable performance over the past few years. These models, however, sometimes learn superficial features from the dataset and cannot generalize to the distributions that are dissimilar to the training scenario. There have been several approaches proposed to reduce model\u2019s reliance on these bias features which can improve model robustness in the out-of-distribution setting. However, existing methods usually use a fixed low-capacity model to deal with various bias features, which ignore the learnability of those features. In this paper, we analyze a set of existing bias features and demonstrate there is no single model that works best for all the cases. We further show that by choosing an appropriate bias model, we can obtain a better robustness result than baselines with a more sophisticated model design.", + "author": "Jieyu Zhao; Xuezhi Wang; Yao Qin; Jilin Chen; Kai-Wei Chang", + "authorids": "/j/jieyu-zhao/; /x/xuezhi-wang/; /y/yao-qin/; /j/jilin-chen/; /k/kai-wei-chang/", + "bibtex": "@inproceedings{zhao-etal-2022-investigating,\n title = \"Investigating Ensemble Methods for Model Robustness Improvement of Text Classifiers\",\n author = \"Zhao, Jieyu and\n Wang, Xuezhi and\n Qin, Yao and\n Chen, Jilin and\n Chang, Kai-Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.118/\",\n doi = \"10.18653/v1/2022.findings-emnlp.118\",\n pages = \"1634--1640\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.118.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.118/", + "pdf_size": 235287, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=273356927207811003&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "University of California, Los Angeles; Google Research; Google Research; Google Research; University of California, Los Angeles", + "aff_domain": "ucla.edu;google.com;google.com;google.com;cs.ucla.edu", + "email": "ucla.edu;google.com;google.com;google.com;cs.ucla.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;0", + "aff_unique_norm": "University of California, Los Angeles;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.ucla.edu;https://research.google", + "aff_unique_abbr": "UCLA;Google Research", + "aff_campus_unique_index": "0;1;1;1;0", + "aff_campus_unique": "Los Angeles;Mountain View", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.432", + "title": "Investigating the Benefits of Free-Form Rationales", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Free-form rationales aim to aid model interpretability by supplying the background knowledge that can help understand model decisions. Crowdsourced rationales are provided for commonsense QA instances in popular datasets such as CoS-E and ECQA, but their utility remains under-investigated. We present human studies which show that ECQA rationales indeed provide additional background information to understand a decision, while over 88% of CoS-E rationales do not. Inspired by this finding, we ask: can the additional context provided by free-form rationales benefit models, similar to human users? We investigate the utility of rationales as an additional source of supervision, by varying the quantity and quality of rationales during training. After controlling for instances where rationales leak the correct answer while not providing additional background knowledge, we find that incorporating only 5% of rationales during training can boost model performance by 47.22% for CoS-E and 57.14% for ECQA during inference. Moreover, we also show that rationale quality matters: compared to crowdsourced rationales, T5-generated rationales provide not only weaker supervision to models, but are also not helpful for humans in aiding model interpretability.", + "author": "Jiao Sun; Swabha Swayamdipta; Jonathan May; Xuezhe Ma", + "authorids": "/j/jiao-sun/; /s/swabha-swayamdipta/; /j/jonathan-may/; /x/xuezhe-ma/", + "bibtex": "@inproceedings{sun-etal-2022-investigating,\n title = \"Investigating the Benefits of Free-Form Rationales\",\n author = \"Sun, Jiao and\n Swayamdipta, Swabha and\n May, Jonathan and\n Ma, Xuezhe\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.432/\",\n doi = \"10.18653/v1/2022.findings-emnlp.432\",\n pages = \"5867--5882\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.432.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.432/", + "pdf_size": 1005031, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=638457869775210237&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff": "University of Southern California; University of Southern California; University of Southern California + Information Sciences Institute; University of Southern California + Information Sciences Institute", + "aff_domain": "usc.edu;usc.edu;isi.edu;isi.edu", + "email": "usc.edu;usc.edu;isi.edu;isi.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+0;0+0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "", + "aff_unique_url": "https://www.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.370", + "title": "Investigating the Robustness of Natural Language Generation from Logical Forms via Counterfactual Samples", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The aim of Logic2Text is to generate controllable and faithful texts conditioned on tables and logical forms, which not only requires a deep understanding of the tables and logical forms, but also warrants symbolic reasoning over the tables according to the logical forms. State-of-the-art methods based on pre-trained models have achieved remarkable performance on the standard test dataset. However, we question whether these methods really learn how to perform logical reasoning, rather than just relying on the spurious correlations between the headers of the tables and operators of the logical form. To verify this hypothesis, we manually construct a set of counterfactual samples, which modify the original logical forms to generate counterfactual logical forms with rare co-occurred headers and operators and corresponding counterfactual references. SOTA methods give much worse results on these counterfactual samples compared with the results on the original test dataset, which verifies our hypothesis. To deal with this problem, we firstly analyze this bias from a causal perspective, based on which we propose two approaches to reduce the model\u2019s reliance on the shortcut. The first one incorporates the hierarchical structure of the logical forms into the model. The second one exploits automatically generated counterfactual data for training. Automatic and manual experimental results on the original test dataset and counterfactual dataset show that our method is effective to alleviate the spurious correlation. Our work points out the weakness of current methods and takes a further step toward developing Logic2Text models with real logical reasoning ability.", + "author": "Chengyuan Liu; Leilei Gan; Kun Kuang; Fei Wu", + "authorids": "/c/chengyuan-liu/; /l/leilei-gan/; /k/kun-kuang/; /f/fei-wu/", + "bibtex": "@inproceedings{liu-etal-2022-investigating,\n title = \"Investigating the Robustness of Natural Language Generation from Logical Forms via Counterfactual Samples\",\n author = \"Liu, Chengyuan and\n Gan, Leilei and\n Kuang, Kun and\n Wu, Fei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.370/\",\n doi = \"10.18653/v1/2022.emnlp-main.370\",\n pages = \"5499--5512\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.370.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.370/", + "pdf_size": 926993, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12070990044054805968&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "School of Software Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University+Shanghai Institute for Advanced Study of Zhejiang University; College of Computer Science and Technology, Zhejiang University+Shanghai AI Laboratory; College of Computer Science and Technology, Zhejiang University+Shanghai Institute for Advanced Study of Zhejiang University+Shanghai AI Laboratory", + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+0;0+1;0+0+1", + "aff_unique_norm": "Zhejiang University;Shanghai AI Laboratory", + "aff_unique_dep": "School of Software Technology;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.shanghai-ai-lab.com", + "aff_unique_abbr": "ZJU;SAIL", + "aff_campus_unique_index": "1;;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0;0+0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.90", + "title": "Is MultiWOZ a Solved Task? An Interactive TOD Evaluation Framework with User Simulator", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Task-Oriented Dialogue (TOD) systems are drawing more and more attention in recent studies.Current methods focus on constructing pre-trained models or fine-tuning strategies while the evaluation of TOD is limited by a policy mismatch problem.That is, during evaluation, the user utterances are from the annotated dataset while these utterances should interact with previous responses which can have many alternatives besides annotated texts.Therefore, in this work, we propose an interactive evaluation framework for TOD. We first build a goal-oriented user simulator based on pre-trained models and then use the user simulator to interact with the dialogue system to generate dialogues.Besides, we introduce a sentence-level and a session-level score to measure the sentence fluency and session coherence in the interactive evaluation. Experimental results show that RL-based TOD systems trained by our proposed user simulator can achieve nearly 98% inform and success rates in the interactive evaluation of MultiWOZ dataset and the proposed scores measure the response quality besides the inform and success rates.We are hoping that our work will encourage simulator-based interactive evaluations in the TOD task.", + "author": "Qinyuan Cheng; Linyang Li; Guofeng Quan; Feng Gao; Xiaofeng Mou; Xipeng Qiu", + "authorids": "/q/qinyuan-cheng/; /l/linyang-li/; /g/guofeng-quan/; /f/feng-gao/; /x/xiaofeng-mou/; /x/xipeng-qiu/", + "bibtex": "@inproceedings{cheng-etal-2022-multiwoz,\n title = \"Is {M}ulti{WOZ} a Solved Task? An Interactive {TOD} Evaluation Framework with User Simulator\",\n author = \"Cheng, Qinyuan and\n Li, Linyang and\n Quan, Guofeng and\n Gao, Feng and\n Mou, Xiaofeng and\n Qiu, Xipeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.90/\",\n doi = \"10.18653/v1/2022.findings-emnlp.90\",\n pages = \"1248--1259\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.90.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.90/", + "pdf_size": 423373, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=501342936722789307&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "School of Computer Science, Fudan University; School of Computer Science, Fudan University; School of Computer Science, Fudan University; AI Innovation Center, Midea Group Co Ltd; AI Innovation Center, Midea Group Co Ltd; School of Computer Science, Fudan University", + "aff_domain": "m.fudan.edu.cn;fudan.edu.cn;m.fudan.edu.cn;midea.com;midea.com;fudan.edu.cn", + "email": "m.fudan.edu.cn;fudan.edu.cn;m.fudan.edu.cn;midea.com;midea.com;fudan.edu.cn", + "github": "https://github.com/xiami2019/User-Simulator", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;1;0", + "aff_unique_norm": "Fudan University;Midea Group Co Ltd", + "aff_unique_dep": "School of Computer Science;AI Innovation Center", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.mideaglobal.com", + "aff_unique_abbr": "Fudan;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.202", + "title": "Is NLP Ready for Standardization?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While standardization is a well-established activity in other scientific fields such as telecommunications, networks or multimedia, in the field of AI and more specifically NLP it is still at its dawn. In this paper, we explore how various aspects of NLP (evaluation, data, tasks...) lack standards and how that can impact science, but also the society, the industry, and regulations. We argue that the numerous initiatives to rationalize the field and establish good practices are only the first step, and developing formal standards remains needed to bring further clarity to NLP research and industry, at a time where this community faces various crises regarding ethics or reproducibility. We thus encourage NLP researchers to contribute to existing and upcoming standardization projects, so that they can express their needs and concerns, while sharing their expertise.", + "author": "Lauriane Aufrant", + "authorids": "/l/lauriane-aufrant/", + "bibtex": "@inproceedings{aufrant-2022-nlp,\n title = \"Is {NLP} Ready for Standardization?\",\n author = \"Aufrant, Lauriane\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.202/\",\n doi = \"10.18653/v1/2022.findings-emnlp.202\",\n pages = \"2785--2800\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.202.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.202/", + "pdf_size": 194568, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10256436657298160380&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Inria, Rocquencourt, France", + "aff_domain": "inria.fr", + "email": "inria.fr", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Inria", + "aff_unique_dep": "", + "aff_unique_url": "https://www.inria.fr", + "aff_unique_abbr": "Inria", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Rocquencourt", + "aff_country_unique_index": "0", + "aff_country_unique": "France" + }, + { + "id": "2022.emnlp-main.302", + "title": "Is a Question Decomposition Unit All We Need?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large Language Models (LMs) have achieved state-of-the-art performance on many Natural Language Processing (NLP) benchmarks. With the growing number of new benchmarks, we build bigger and more complex LMs. However, building new LMs may not be an ideal option owing to the cost, time and environmental impact associated with it. We explore an alternative route: can we modify data by expressing it in terms of the model\u2019s strengths, so that a question becomes easier for models to answer? We investigate if humans can decompose a hard question into a set of simpler questions that are relatively easier for models to solve. We analyze a range of datasets involving various forms of reasoning and find that it is indeed possible to significantly improve model performance (24% for GPT3 and 29% for RoBERTa-SQuAD along with a symbolic calculator) via decomposition. Our approach provides a viable option to involve people in NLP research in a meaningful way. Our findings indicate that Human-in-the-loop Question Decomposition (HQD) can potentially provide an alternate path to building large LMs.", + "author": "Pruthvi Patel; Swaroop Mishra; Mihir Parmar; Chitta Baral", + "authorids": "/p/pruthvi-patel/; /s/swaroop-mishra/; /m/mihir-parmar/; /c/chitta-baral/", + "bibtex": "@inproceedings{patel-etal-2022-question,\n title = \"Is a Question Decomposition Unit All We Need?\",\n author = \"Patel, Pruthvi and\n Mishra, Swaroop and\n Parmar, Mihir and\n Baral, Chitta\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.302/\",\n doi = \"10.18653/v1/2022.emnlp-main.302\",\n pages = \"4553--4569\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.302.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.302/", + "pdf_size": 444386, + "gs_citation": 47, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10973632707536735847&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Arizona State University; Arizona State University; Arizona State University; Arizona State University", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/Pruthvi98/QuestionDecomposition", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Arizona State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.asu.edu", + "aff_unique_abbr": "ASU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.314", + "title": "Is anisotropy really the cause of BERT embeddings not being semantic?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this paper we conduct a set of experiments aimed to improve our understanding of the lack of semantic isometry in BERT, i.e. the lack of correspondence between the embedding and meaning spaces of its contextualized word representations. Our empirical results show that, contrary to popular belief, the anisotropy is not the root cause of the poor performance of these contextual models\u2019 embeddings in semantic tasks. What does affect both the anisotropy and semantic isometry is a set of known biases: frequency, subword, punctuation, and case. For each one of them, we measure its magnitude and the effect of its removal, showing that these biases contribute but do not completely explain the phenomenon of anisotropy and lack of semantic isometry of these contextual language models.", + "author": "Alejandro Fuster Baggetto; Victor Fresno", + "authorids": "/a/alejandro-fuster-baggetto/; /v/victor-fresno/", + "bibtex": "@inproceedings{fuster-baggetto-fresno-2022-anisotropy,\n title = \"Is anisotropy really the cause of {BERT} embeddings not being semantic?\",\n author = \"Fuster Baggetto, Alejandro and\n Fresno, Victor\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.314/\",\n doi = \"10.18653/v1/2022.findings-emnlp.314\",\n pages = \"4271--4281\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.314.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.314/", + "pdf_size": 567218, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6190936080182842073&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2 + }, + { + "id": "2022.emnlp-industry.27", + "title": "Is it out yet? Automatic Future Product Releases Extraction from Web Data", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Identifying the release of new products and their predicted demand in advance is highly valuable for E-Commerce marketplaces and retailers. The information of an upcoming product release is used for inventory management, marketing campaigns and pre-order suggestions. Often, the announcement of an upcoming product release is widely available in multiple web pages such as blogs, chats or news articles. However, to the best of our knowledge, an automatic system to extract future product releases from web data has not been presented. In this work we describe an ML-powered multi-stage pipeline to automatically identify future product releases and rank their predicted demand from unstructured pages across the whole web. Our pipeline includes a novel Longformer-based model which uses a global attention mechanism guided by pre-calculated Named Entity Recognition predictions related to product releases. The model training data is based on a new corpus of 30K web pages manually annotated to identify future product releases. We made the dataset openly available at https://doi.org/10.5281/zenodo.6894770.", + "author": "Gilad Fuchs; Ido Ben-shaul; Matan Mandelbrod", + "authorids": "/g/gilad-fuchs/; /i/ido-ben-shaul/; /m/matan-mandelbrod/", + "bibtex": "@inproceedings{fuchs-etal-2022-yet,\n title = \"Is it out yet? Automatic Future Product Releases Extraction from Web Data\",\n author = \"Fuchs, Gilad and\n Ben-shaul, Ido and\n Mandelbrod, Matan\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.27/\",\n doi = \"10.18653/v1/2022.emnlp-industry.27\",\n pages = \"263--271\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.27.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.27/", + "pdf_size": 263526, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:xPYZPLdDFDkJ:scholar.google.com/&scioq=Is+it+out+yet%3F+Automatic+Future+Product+Releases+Extraction+from+Web+Data&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff": "eBay Research / Israel; eBay Research / Israel; eBay Research / Israel", + "aff_domain": "ebay.com;ebay.com;ebay.com", + "email": "ebay.com;ebay.com;ebay.com", + "github": "https://github.com/google-research/text-to-text-transfer-transformerdataset-preparation", + "project": "https://doi.org/10.5281/zenodo.6894770", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "eBay Research", + "aff_unique_dep": "Research", + "aff_unique_url": "https://research.ebay.com", + "aff_unique_abbr": "eBay", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.emnlp-main.535", + "title": "Is the Brain Mechanism for Hierarchical Structure Building Universal Across Languages? An fMRI Study of Chinese and English", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Evidence from psycholinguistic studies suggests that the human brain builds a hierarchical syntactic structure during language comprehension. However, it is still unknown whether the neural basis of such structures is universal across languages. In this paper, we first analyze the differences in language structure between two diverse languages: Chinese and English. By computing the working memory requirements when applying parsing strategies to different language structures, we find that top-down parsing generates less memory load for the right-branching English and bottom-up parsing is less memory-demanding for Chinese.Then we use functional magnetic resonance imaging (fMRI) to investigate whether the brain has different syntactic adaptation strategies in processing Chinese and English. Specifically, for both Chinese and English, we extract predictors from the implementations of different parsing strategies, i.e., bottom-up and top-down. Then, these predictors are separately associated with fMRI signals. Results show that for Chinese and English, the brain utilizes bottom-up and top-down parsing strategies separately. These results reveal that the brain adopts parsing strategies with less memory processing load according to different language structures.", + "author": "Xiaohan Zhang; Shaonan Wang; Nan Lin; Chengqing Zong", + "authorids": "/x/xiaohan-zhang/; /s/shaonan-wang/; /n/nan-lin/; /c/chengqing-zong/", + "bibtex": "@inproceedings{zhang-etal-2022-brain,\n title = \"Is the Brain Mechanism for Hierarchical Structure Building Universal Across Languages? An f{MRI} Study of {C}hinese and {E}nglish\",\n author = \"Zhang, Xiaohan and\n Wang, Shaonan and\n Lin, Nan and\n Zong, Chengqing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.535/\",\n doi = \"10.18653/v1/2022.emnlp-main.535\",\n pages = \"7852--7861\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.535.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.535/", + "pdf_size": 1391801, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15070944429467922417&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "National Laboratory of Pattern Recognition, Institute of Automation, CAS+School of Artificial Intelligence, University of Chinese Academy of Sciences; National Laboratory of Pattern Recognition, Institute of Automation, CAS+School of Artificial Intelligence, University of Chinese Academy of Sciences; CAS Key Laboratory of Behavioural Sciences, Institute of Psychology+Department of Psychology, University of Chinese Academy of Sciences; National Laboratory of Pattern Recognition, Institute of Automation, CAS+School of Artificial Intelligence, University of Chinese Academy of Sciences+CAS Center for Excellence in Brain Science and Intelligence Technology", + "aff_domain": "nlpr.ia.ac.cn;nlpr.ia.ac.cn;psych.ac.cn;nlpr.ia.ac.cn", + "email": "nlpr.ia.ac.cn;nlpr.ia.ac.cn;psych.ac.cn;nlpr.ia.ac.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0+1;0+1+0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence", + "aff_unique_url": "http://www.ia.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.404", + "title": "IsoVec: Controlling the Relative Isomorphism of Word Embedding Spaces", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The ability to extract high-quality translation dictionaries from monolingual word embedding spaces depends critically on the geometric similarity of the spaces\u2014their degree of \u201cisomorphism.\u201d We address the root-cause of faulty cross-lingual mapping: that word embedding training resulted in the underlying spaces being non-isomorphic. We incorporate global measures of isomorphism directly into the skipgram loss function, successfully increasing the relative isomorphism of trained word embedding spaces and improving their ability to be mapped to a shared cross-lingual space. The result is improved bilingual lexicon induction in general data conditions, under domain mismatch, and with training algorithm dissimilarities. We release IsoVec at https://github.com/kellymarchisio/isovec.", + "author": "Kelly Marchisio; Neha Verma; Kevin Duh; Philipp Koehn", + "authorids": "/k/kelly-marchisio/; /n/neha-verma/; /k/kevin-duh/; /p/philipp-koehn/", + "bibtex": "@inproceedings{marchisio-etal-2022-isovec,\n title = \"{I}so{V}ec: Controlling the Relative Isomorphism of Word Embedding Spaces\",\n author = \"Marchisio, Kelly and\n Verma, Neha and\n Duh, Kevin and\n Koehn, Philipp\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.404/\",\n doi = \"10.18653/v1/2022.emnlp-main.404\",\n pages = \"6019--6033\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.404.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.404/", + "pdf_size": 462430, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7204920352545974302&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Johns Hopkins University; Johns Hopkins University; Johns Hopkins University; Johns Hopkins University", + "aff_domain": "jhu.edu;jhu.edu;cs.jhu.edu;jhu.edu", + "email": "jhu.edu;jhu.edu;cs.jhu.edu;jhu.edu", + "github": "https://github.com/kellymarchisio/isovec", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Johns Hopkins University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.jhu.edu", + "aff_unique_abbr": "JHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.20", + "title": "Iterative Stratified Testing and Measurement for Automated Model Updates", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Automating updates to machine learning systems is an important but understudied challenge in AutoML. The high model variance of many cutting-edge deep learning architectures means that retraining a model provides no guarantee of accurate inference on all sample types. To address this concern, we present Automated Data-Shape Stratified Model Updates (ADSMU), a novel framework that relies on iterative model building coupled with data-shape stratified model testing and improvement. Using ADSMU, we observed a 26% (relative) improvement in accuracy for new model use cases on a large-scale NLU system, compared to a naive (manually) retrained baseline and current cutting-edge methods.", + "author": "Elizabeth Dekeyser; Nicholas Comment; Shermin Pei; Rajat Kumar; Shruti Rai; Fengtao Wu; Lisa Haverty; Kanna Shimizu", + "authorids": "/e/elizabeth-dekeyser/; /n/nicholas-comment/; /s/shermin-pei/; /r/rajat-kumar/; /s/shruti-rai/; /f/fengtao-wu/; /l/lisa-haverty/; /k/kanna-shimizu/", + "bibtex": "@inproceedings{dekeyser-etal-2022-iterative,\n title = \"Iterative Stratified Testing and Measurement for Automated Model Updates\",\n author = \"Dekeyser, Elizabeth and\n Comment, Nicholas and\n Pei, Shermin and\n Kumar, Rajat and\n Rai, Shruti and\n Wu, Fengtao and\n Haverty, Lisa and\n Shimizu, Kanna\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.20/\",\n doi = \"10.18653/v1/2022.emnlp-industry.20\",\n pages = \"198--205\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.20.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.20/", + "pdf_size": 436404, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:RzoS2Noe5KgJ:scholar.google.com/&scioq=Iterative+Stratified+Testing+and+Measurement+for+Automated+Model+Updates&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff": "Amazon Alexa; Amazon Alexa; Amazon Alexa; Amazon Alexa; Amazon Alexa; Amazon Alexa; Amazon Alexa; Amazon Alexa", + "aff_domain": "amazon.com; ; ;amazon.com; ; ; ; ", + "email": "amazon.com; ; ;amazon.com; ; ; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Amazon", + "aff_unique_dep": "Amazon Alexa", + "aff_unique_url": "https://www.amazon.com/alexa", + "aff_unique_abbr": "Amazon Alexa", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.174", + "title": "Iteratively Prompt Pre-trained Language Models for Chain of Thought", + "track": "main", + "status": "Main", + "award": false, + "abstract": "While Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step reasoning. Similar to how humans develop a \u201cchain of thought\u201d for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step\u2019s contexts. Experiments on three datasets involving multi-step reasoning show the effectiveness of the iterative scheme and the context-aware prompter design.", + "author": "Boshi Wang; Xiang Deng; Huan Sun", + "authorids": "/b/boshi-wang/; /x/xiang-deng/; /h/huan-sun/", + "bibtex": "@inproceedings{wang-etal-2022-iteratively,\n title = \"Iteratively Prompt Pre-trained Language Models for Chain of Thought\",\n author = \"Wang, Boshi and\n Deng, Xiang and\n Sun, Huan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.174/\",\n doi = \"10.18653/v1/2022.emnlp-main.174\",\n pages = \"2714--2730\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.174.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.174/", + "pdf_size": 1451929, + "gs_citation": 122, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6514490978435519479&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff": "The Ohio State University, Columbus, OH; The Ohio State University, Columbus, OH; The Ohio State University, Columbus, OH", + "aff_domain": "osu.edu;osu.edu;osu.edu", + "email": "osu.edu;osu.edu;osu.edu", + "github": "https://github.com/sunlab-osu/IterPrompt", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "The Ohio State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.osu.edu", + "aff_unique_abbr": "OSU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Columbus", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.46", + "title": "It\u2019s Better to Teach Fishing than Giving a Fish: An Auto-Augmented Structure-aware Generative Model for Metaphor Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Metaphor Detection aims to identify the metaphorical meaning of words in the sentence. Most existing work is discriminant models, which use the contextual semantic information extracted by transformers for classifications directly. Due to insufficient training data and corresponding paraphrases, recent methods focus on how to get external resources and utilize them to introduce more knowledge. Currently, contextual modeling and external data are two key issues in the field. In this paper, we propose **A**n **A**uto-**A**ugmented **S**tructure-aware generative model (**AAAS**) for metaphor detection, which transforms the classification task into a keywords-extraction task. Specifically, we propose the task of structure information extraction to allow the model to use the \u2018structural language\u2019 to describe the whole sentence. Furthermore, without any other external resources, we design a simple but effective auto-augmented method to expand the limited datasets. Experimental results show that **AAAS** obtains competitive results compared with state-of-the-art methods.", + "author": "Huawen Feng; Qianli Ma", + "authorids": "/h/huawen-feng/; /q/qianli-ma/", + "bibtex": "@inproceedings{feng-ma-2022-better,\n title = \"It`s Better to Teach Fishing than Giving a Fish: An Auto-Augmented Structure-aware Generative Model for Metaphor Detection\",\n author = \"Feng, Huawen and\n Ma, Qianli\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.46/\",\n doi = \"10.18653/v1/2022.findings-emnlp.46\",\n pages = \"656--667\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.46.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.46/", + "pdf_size": 3549563, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16745161696731553685&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China; School of Computer Science and Engineering, South China University of Technology, Guangzhou, China", + "aff_domain": "qq.com;scut.edu.cn", + "email": "qq.com;scut.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "South China University of Technology", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.scut.edu.cn", + "aff_unique_abbr": "SCUT", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Guangzhou", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.550", + "title": "JANUS: Joint Autoregressive and Non-autoregressive Training with Auxiliary Loss for Sequence Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transformer-based autoregressive and non-autoregressive models have played an essential role in sequence generation tasks. The autoregressive model can obtain excellent performance, while the non-autoregressive model brings fast decoding speed for inference. In this paper, we propose JANUS, a Joint Autoregressive and Non-autoregressive training method using aUxiliary losS to enhance the model performance in both AR and NAR manner simultaneously and effectively alleviate the problem of distribution discrepancy.Further, we pre-train BART with JANUS on a large corpus with minimal cost (16 GPU days) and make the BART-JANUS capable of non-autoregressive generation, demonstrating that our approach can transfer the AR knowledge to NAR. Empirically, we show our approach and BART-JANUS can achieve significant improvement on multiple generation tasks, including machine translation and GLGE benchmarks. Our code is available at Github.", + "author": "Xiaobo Liang; Lijun Wu; Juntao Li; Min Zhang", + "authorids": "/x/xiaobo-liang/; /l/lijun-wu/; /j/juntao-li/; /m/min-zhang/", + "bibtex": "@inproceedings{liang-etal-2022-janus,\n title = \"{JANUS}: Joint Autoregressive and Non-autoregressive Training with Auxiliary Loss for Sequence Generation\",\n author = \"Liang, Xiaobo and\n Wu, Lijun and\n Li, Juntao and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.550/\",\n doi = \"10.18653/v1/2022.emnlp-main.550\",\n pages = \"8050--8060\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.550.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.550/", + "pdf_size": 892144, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17712986204212343385&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "Soochow University; Soochow University; Microsoft Research; Soochow University", + "aff_domain": "stu.suda.edu.cn;stu.suda.edu.cn;microsoft.com;suda.edu.cn", + "email": "stu.suda.edu.cn;stu.suda.edu.cn;microsoft.com;suda.edu.cn", + "github": "https://github.com/dropreg/JANUS", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Soochow University;Microsoft Corporation", + "aff_unique_dep": ";Microsoft Research", + "aff_unique_url": "https://www.soochow.edu.cn;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "Soochow U;MSR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.825", + "title": "JDDC 2.1: A Multimodal Chinese Dialogue Dataset with Joint Tasks of Query Rewriting, Response Generation, Discourse Parsing, and Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The popularity of multimodal dialogue has stimulated the need for a new generation of dialogue agents with multimodal interactivity.When users communicate with customer service, they may express their requirements by means of text, images, or even videos. Visual information usually acts as discriminators for product models, or indicators of product failures, which play an important role in the E-commerce scenario.On the other hand, detailed information provided by the images is limited, and typically, customer service systems cannot understand the intent of users without the input text.Thus, bridging the gap between the image and text is crucial for communicating with customers.In this paper, we construct JDDC 2.1, a large-scale multimodal multi-turn dialogue dataset collected from a mainstream Chinese E-commerce platform, containing about 246K dialogue sessions, 3M utterances, and 507K images, along with product knowledge bases and image category annotations. Over our dataset, we jointly define four tasks: the multimodal dialogue response generation task,the multimodal query rewriting task, the multimodal dialogue discourse parsing task, and the multimodal dialogue summarization task.JDDC 2.1 is the first corpus with annotations for all the above tasks over the same dialogue sessions, which facilitates the comprehensive research around the dialogue.In addition, we present several text-only and multimodal baselines and show the importance of visual information for these tasks. Our dataset and implements will be publicly available.", + "author": "Nan Zhao; Haoran Li; Youzheng Wu; Xiaodong He", + "authorids": "/n/nan-zhao/; /h/haoran-li/; /y/youzheng-wu/; /x/xiaodong-he/", + "bibtex": "@inproceedings{zhao-etal-2022-jddc,\n title = \"{JDDC} 2.1: A Multimodal {C}hinese Dialogue Dataset with Joint Tasks of Query Rewriting, Response Generation, Discourse Parsing, and Summarization\",\n author = \"Zhao, Nan and\n Li, Haoran and\n Wu, Youzheng and\n He, Xiaodong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.825/\",\n doi = \"10.18653/v1/2022.emnlp-main.825\",\n pages = \"12037--12051\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.825.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.825/", + "pdf_size": 8622737, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11222071729312678088&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 0, + "aff": "JD AI Research; JD AI Research; JD AI Research; JD AI Research", + "aff_domain": "jd.com;jd.com; ; ", + "email": "jd.com;jd.com; ; ", + "github": "", + "project": "https://JD.com", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "JD AI Research", + "aff_unique_dep": "", + "aff_unique_url": "https://www.jd.com", + "aff_unique_abbr": "JD AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.389", + "title": "JamPatoisNLI: A Jamaican Patois Natural Language Inference Dataset", + "track": "main", + "status": "finding", + "award": false, + "abstract": "JamPatoisNLI provides the first dataset for natural language inference in a creole language, Jamaican Patois.Many of the most-spoken low-resource languages are creoles. These languages commonly have a lexicon derived from a major world language and a distinctive grammar reflecting the languages of the original speakers and the process of language birth by creolization. This gives them a distinctive place in exploring the effectiveness of transfer from large monolingual or multilingual pretrained models. While our work, along with previous work, shows that transfer from these models to low-resource languages that are unrelated to languages in their training set is not very effective, we would expect stronger results from transfer to creoles. Indeed, our experiments show considerably better results from few-shot learning of JamPatoisNLI than for such unrelated languages, and help us begin to understand how the unique relationship between creoles and their high-resource base languages affect cross-lingual transfer. JamPatoisNLI, which consists of naturally-occurring premises and expert-written hypotheses, is a step towards steering research into a traditionally underserved language and a useful benchmark for understanding cross-lingual NLP.", + "author": "Ruth-Ann Armstrong; John Hewitt; Christopher Manning", + "authorids": "/r/ruth-ann-armstrong/; /j/john-hewitt/; /c/christopher-d-manning/", + "bibtex": "@inproceedings{armstrong-etal-2022-jampatoisnli,\n title = \"{J}am{P}atois{NLI}: A Jamaican Patois Natural Language Inference Dataset\",\n author = \"Armstrong, Ruth-Ann and\n Hewitt, John and\n Manning, Christopher\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.389/\",\n doi = \"10.18653/v1/2022.findings-emnlp.389\",\n pages = \"5307--5320\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.389.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.389/", + "pdf_size": 318942, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17748500563967326622&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University", + "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", + "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.419", + "title": "Joint Audio/Text Training for Transformer Rescorer of Streaming Speech Recognition", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recently, there has been an increasing interest in two-pass streaming end-to-end speech recognition (ASR) that incorporates a 2nd-pass rescoring model on top of the conventional 1st-pass streaming ASR model to improve recognition accuracy while keeping latency low. One of the latest 2nd-pass rescoring model, Transformer Rescorer, takes the n-best initial outputs and audio embeddings from the 1st-pass model, and then choose the best output by re-scoring the n-best initial outputs. However, training this Transformer Rescorer requires expensive paired audio-text training data because the model uses audio embeddings as input. In this work, we present our Joint Audio/Text training method for Transformer Rescorer, to leverage unpaired text-only data which is relatively cheaper than paired audio-text data. We evaluate Transformer Rescorer with our Joint Audio/Text training on Librispeech dataset as well as our large-scale in-house dataset and show that our training method can improve word error rate (WER) significantly compared to standard Transformer Rescorer without requiring any extra model parameters or latency.", + "author": "Suyoun Kim; Ke Li; Lucas Kabela; Ron Huang; Jiedan Zhu; Ozlem Kalinli; Duc Le", + "authorids": "/s/suyoun-kim/; /k/ke-li/; /l/lucas-kabela/; /r/ron-huang/; /j/jiedan-zhu/; /o/ozlem-kalinli/; /d/duc-le/", + "bibtex": "@inproceedings{kim-etal-2022-joint,\n title = \"Joint Audio/Text Training for Transformer Rescorer of Streaming Speech Recognition\",\n author = \"Kim, Suyoun and\n Li, Ke and\n Kabela, Lucas and\n Huang, Ron and\n Zhu, Jiedan and\n Kalinli, Ozlem and\n Le, Duc\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.419/\",\n doi = \"10.18653/v1/2022.findings-emnlp.419\",\n pages = \"5717--5722\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.419.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.419/", + "pdf_size": 469491, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8095599060354496011&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Meta, USA; Meta, USA; Meta, USA; Meta, USA; Meta, USA; Meta, USA; Meta, USA", + "aff_domain": "meta.com; ; ; ; ; ; ", + "email": "meta.com; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Meta Platforms, Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://meta.com", + "aff_unique_abbr": "Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.817", + "title": "Joint Completion and Alignment of Multilingual Knowledge Graphs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge Graph Completion (KGC) predicts missing facts in an incomplete Knowledge Graph (KG). Multilingual KGs associate entities and relations with surface forms written in different languages. An entity or relation may be associated with distinct IDs in different KGs, necessitating entity alignment (EA) and relation alignment (RA). Many effective algorithms have been proposed for completion and alignment as separate tasks. Here we show that these tasks are synergistic and best solved together. Our multitask approach starts with a state-of-the-art KG embedding scheme, but adds a novel relation representation based on sets of embeddings of (subject, object) entity pairs. This representation leads to a new relation alignment loss term based on a maximal bipartite matching between two sets of embedding vectors. This loss is combined with traditional KGC loss and optionally, losses based on text embeddings of entity (and relation) names. In experiments over KGs in seven languages, we find that our system achieves large improvements in KGC compared to a strong completion model that combines known facts in all languages. It also outperforms strong EA and RA baselines, underscoring the value of joint alignment and completion.", + "author": "Soumen Chakrabarti; Harkanwar Singh; Shubham Lohiya; Prachi Jain; Mausam -", + "authorids": "/s/soumen-chakrabarti/; /h/harkanwar-singh/; /s/shubham-lohiya/; /p/prachi-jain/; /m/mausam/", + "bibtex": "@inproceedings{chakrabarti-etal-2022-joint,\n title = \"Joint Completion and Alignment of Multilingual Knowledge Graphs\",\n author = \"Chakrabarti, Soumen and\n Singh, Harkanwar and\n Lohiya, Shubham and\n Jain, Prachi and\n -, Mausam\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.817/\",\n doi = \"10.18653/v1/2022.emnlp-main.817\",\n pages = \"11922--11938\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.817.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.817/", + "pdf_size": 549392, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16837865423003045311&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "IIT Bombay; IIT Delhi; IIT Bombay; IIT Delhi; IIT Delhi", + "aff_domain": "cse.iitb.ac.in; ; ;cse.iitd.ac.in;cse.iitd.ac.in", + "email": "cse.iitb.ac.in; ; ;cse.iitd.ac.in;cse.iitd.ac.in", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;1;1", + "aff_unique_norm": "Indian Institute of Technology Bombay;Indian Institute of Technology Delhi", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.iitb.ac.in;https://www.iitd.ac.in", + "aff_unique_abbr": "IITB;IITD", + "aff_campus_unique_index": "0;1;0;1;1", + "aff_campus_unique": "Mumbai;Delhi", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.findings-emnlp.341", + "title": "Joint Multilingual Knowledge Graph Completion and Alignment", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Knowledge graph (KG) alignment and completion are usually treated as two independent tasks. While recent work has leveraged entity and relation alignments from multiple KGs, such as alignments between multilingual KGs with common entities and relations, a deeper understanding of the ways in which multilingual KG completion (MKGC) can aid the creation of multilingual KG alignments (MKGA) is still limited. Motivated by the observation that structural inconsistencies \u2013 the main challenge for MKGA models \u2013 can be mitigated through KG completion methods, we propose a novel model for jointly completing and aligning knowledge graphs. The proposed model combines two components that jointly accomplish KG completion and alignment. These two components employ relation-aware graph neural networks that we propose to encode multi-hop neighborhood structures into entity and relation representations. Moreover, we also propose (i) a structural inconsistency reduction mechanism to incorporate information from the completion into the alignment component, and (ii) an alignment seed enlargement and triple transferring mechanism to enlarge alignment seeds and transfer triples during KGs alignment. Extensive experiments on a public multilingual benchmark show that our proposed model outperforms existing competitive baselines, obtaining new state-of-the-art results on both MKGC and MKGA tasks.", + "author": "Vinh Tong; Dat Quoc Nguyen; Trung Thanh Huynh; Tam Thanh Nguyen; Quoc Viet Hung Nguyen; Mathias Niepert", + "authorids": "/v/vinh-tong/; /d/dat-quoc-nguyen/; /t/trung-thanh-huynh/; /t/tam-thanh-nguyen/; /q/quoc-viet-hung-nguyen/; /m/mathias-niepert/", + "bibtex": "@inproceedings{tong-etal-2022-joint,\n title = \"Joint Multilingual Knowledge Graph Completion and Alignment\",\n author = \"Tong, Vinh and\n Nguyen, Dat Quoc and\n Huynh, Trung Thanh and\n Nguyen, Tam Thanh and\n Nguyen, Quoc Viet Hung and\n Niepert, Mathias\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.341/\",\n doi = \"10.18653/v1/2022.findings-emnlp.341\",\n pages = \"4646--4658\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.341.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.341/", + "pdf_size": 538740, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6523689294741360923&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Stuttgart, Germany; VinAI Research, Vietnam; EPFL, Switzerland; Griffith University, Australia; Griffith University, Australia; University of Stuttgart, Germany", + "aff_domain": "ipvs.uni-stuttgart.de;vinai.io; ; ; ; ", + "email": "ipvs.uni-stuttgart.de;vinai.io; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;3;0", + "aff_unique_norm": "University of Stuttgart;VinAI Research;\u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne;Griffith University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.uni-stuttgart.de;https://www.vin.ai;https://www.epfl.ch;https://www.griffith.edu.au", + "aff_unique_abbr": "USTuttgart;VinAI;EPFL;Griffith", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;3;3;0", + "aff_country_unique": "Germany;Vietnam;Switzerland;Australia" + }, + { + "id": "2022.emnlp-main.425", + "title": "Just Fine-tune Twice: Selective Differential Privacy for Large Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Protecting large language models from privacy leakage is becoming increasingly crucial with their wide adoption in real-world products. Yet applying *differential privacy* (DP), a canonical notion with provable privacy guarantees for machine learning models, to those models remains challenging due to the trade-off between model utility and privacy loss. Utilizing the fact that sensitive information in language data tends to be sparse, Shi et al. (2021) formalized a DP notion extension called *Selective Differential Privacy* (SDP) to protect only the sensitive tokens defined by a policy function. However, their algorithm only works for RNN-based models. In this paper, we develop a novel framework, *Just Fine-tune Twice* (JFT), that achieves SDP for state-of-the-art large transformer-based models. Our method is easy to implement: it first fine-tunes the model with *redacted* in-domain data, and then fine-tunes it again with the *original* in-domain data using a private training mechanism. Furthermore, we study the scenario of imperfect implementation of policy functions that misses sensitive tokens and develop systematic methods to handle it. Experiments show that our method achieves strong utility compared to previous baselines. We also analyze the SDP privacy guarantee empirically with the canary insertion attack.", + "author": "Weiyan Shi; Ryan Shea; Si Chen; Chiyuan Zhang; Ruoxi Jia; Zhou Yu", + "authorids": "/w/weiyan-shi/; /r/ryan-shea/; /s/si-chen/; /c/chiyuan-zhang/; /r/ruoxi-jia/; /z/zhou-yu/", + "bibtex": "@inproceedings{shi-etal-2022-just,\n title = \"Just Fine-tune Twice: Selective Differential Privacy for Large Language Models\",\n author = \"Shi, Weiyan and\n Shea, Ryan and\n Chen, Si and\n Zhang, Chiyuan and\n Jia, Ruoxi and\n Yu, Zhou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.425/\",\n doi = \"10.18653/v1/2022.emnlp-main.425\",\n pages = \"6327--6340\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.425.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.425/", + "pdf_size": 1187078, + "gs_citation": 60, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1627537263818093545&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Columbia University\u2020; Columbia University\u2020; Virginia Tech\u2021; Google Research\u22c4; Virginia Tech\u2021; Columbia University\u2020", + "aff_domain": "columbia.edu;columbia.edu;vt.edu;google.com;vt.edu;columbia.edu", + "email": "columbia.edu;columbia.edu;vt.edu;google.com;vt.edu;columbia.edu", + "github": "https://github.com/wyshi/sdp_transformers", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;1;0", + "aff_unique_norm": "Columbia University;Virginia Tech;Google", + "aff_unique_dep": ";;Google Research", + "aff_unique_url": "https://www.columbia.edu;https://www.vt.edu;https://research.google", + "aff_unique_abbr": "Columbia;VT;Google Research", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.6", + "title": "KE-GCL: Knowledge Enhanced Graph Contrastive Learning for Commonsense Question Answering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Commonsense question answering (CQA) aims to choose the correct answers for commonsense questions. Most existing works focus on extracting and reasoning over external knowledge graphs (KG). However, the noise in KG prevents these models from learning effective representations. In this paper, we propose a Knowledge Enhanced Graph Contrastive Learning model (KE-GCL) by incorporating the contextual descriptions of entities and adopting a graph contrastive learning scheme. Specifically, for QA pairs we represent the knowledge from KG and contextual descriptions. Then, the representations of contextual descriptions as context nodes are inserted into KG, forming the knowledge-enhanced graphs.Moreover, we design a contrastive learning method on graphs. For knowledge-enhanced graphs, we build their augmented views with an adaptive sampling strategy. After that, we reason over graphs to update their representations by scattering edges and aggregating nodes. To further improve GCL, hard graph negatives are chosen based on incorrect answers. Extensive experiments on two benchmark datasets demonstrate the effectiveness of our proposed KE-GCL, which outperforms previous methods consistently.", + "author": "Lihui Zhang; Ruifan Li", + "authorids": "/l/lihui-zhang/; /r/ruifan-li/", + "bibtex": "@inproceedings{zhang-li-2022-ke,\n title = \"{KE}-{GCL}: Knowledge Enhanced Graph Contrastive Learning for Commonsense Question Answering\",\n author = \"Zhang, Lihui and\n Li, Ruifan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.6/\",\n doi = \"10.18653/v1/2022.findings-emnlp.6\",\n pages = \"76--87\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.6.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.6/", + "pdf_size": 956285, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11524634325328702243&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "School of Artificial Intelligence, Be\u0133ing University of Posts and Telecommunications, China+Engineering Research Center of Information Networks, Ministry of Education, China; School of Artificial Intelligence, Be\u0133ing University of Posts and Telecommunications, China+Engineering Research Center of Information Networks, Ministry of Education, China", + "aff_domain": "bupt.edu.cn;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn", + "github": "https://github.com/hlhqbzd/KE-GCL", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Engineering Research Center of Information Networks", + "aff_unique_dep": "School of Artificial Intelligence;Ministry of Education", + "aff_unique_url": "http://www.bupt.edu.cn/;", + "aff_unique_abbr": "BUPT;", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.206", + "title": "KECP: Knowledge Enhanced Contrastive Prompting for Few-shot Extractive Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Extractive Question Answering (EQA) is one of the most essential tasks in Machine Reading Comprehension (MRC), which can be solved by fine-tuning the span selecting heads of Pre-trained Language Models (PLMs). However, most existing approaches for MRC may perform poorly in the few-shot learning scenario. To solve this issue, we propose a novel framework named Knowledge Enhanced Contrastive Prompt-tuning (KECP). Instead of adding pointer heads to PLMs, we introduce a seminal paradigm for EQA that transforms the task into a non-autoregressive Masked Language Modeling (MLM) generation problem. Simultaneously, rich semantics from the external knowledge base (KB) and the passage context support enhancing the query\u2019s representations. In addition, to boost the performance of PLMs, we jointly train the model by the MLM and contrastive learning objectives. Experiments on multiple benchmarks demonstrate that our method consistently outperforms state-of-the-art approaches in few-shot settings by a large margin.", + "author": "Jianing Wang; Chengyu Wang; Minghui Qiu; Qiuhui Shi; Hongbin Wang; Jun Huang; Ming Gao", + "authorids": "/j/jianing-wang/; /c/chengyu-wang/; /m/minghui-qiu/; /q/qiuhui-shi/; /h/hongbin-wang/; /j/jun-huang/; /m/ming-gao/", + "bibtex": "@inproceedings{wang-etal-2022-kecp,\n title = \"{KECP}: Knowledge Enhanced Contrastive Prompting for Few-shot Extractive Question Answering\",\n author = \"Wang, Jianing and\n Wang, Chengyu and\n Qiu, Minghui and\n Shi, Qiuhui and\n Wang, Hongbin and\n Huang, Jun and\n Gao, Ming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.206/\",\n doi = \"10.18653/v1/2022.emnlp-main.206\",\n pages = \"3152--3163\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.206.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.206/", + "pdf_size": 1015566, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4732149351857177450&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Data Science and Engineering, East China Normal University, Shanghai, China+KLATASDS-MOE, School of Statistics, East China Normal University, Shanghai, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Ant Group, Hangzhou, China; Ant Group, Hangzhou, China; Alibaba Group, Hangzhou, China; School of Data Science and Engineering, East China Normal University, Shanghai, China+KLATASDS-MOE, School of Statistics, East China Normal University, Shanghai, China", + "aff_domain": "gmail.com;alibaba-inc.com;alibaba-inc.com;antgroup.com;antgroup.com;alibaba-inc.com;dase.ecnu.edu.cn", + "email": "gmail.com;alibaba-inc.com;alibaba-inc.com;antgroup.com;antgroup.com;alibaba-inc.com;dase.ecnu.edu.cn", + "github": "https://github.com/alibaba/EasyNLP", + "project": "", + "author_num": 7, + "aff_unique_index": "0+0;1;1;2;2;1;0+0", + "aff_unique_norm": "East China Normal University;Alibaba Group;Ant Group", + "aff_unique_dep": "School of Data Science and Engineering;;", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.alibaba.com;https://www.antgroup.com", + "aff_unique_abbr": "ECNU;Alibaba;Ant Group", + "aff_campus_unique_index": "0+0;1;1;1;1;1;0+0", + "aff_campus_unique": "Shanghai;Hangzhou", + "aff_country_unique_index": "0+0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.744", + "title": "KOLD: Korean Offensive Language Dataset", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent directions for offensive language detection are hierarchical modeling, identifying the type and the target of offensive language, and interpretability with offensive span annotation and prediction. These improvements are focused on English and do not transfer well to other languages because of cultural and linguistic differences. In this paper, we present the Korean Offensive Language Dataset (KOLD) comprising 40,429 comments, which are annotated hierarchically with the type and the target of offensive language, accompanied by annotations of the corresponding text spans. We collect the comments from NAVER news and YouTube platform and provide the titles of the articles and videos as the context information for the annotation process. We use these annotated comments as training data for Korean BERT and RoBERTa models and find that they are effective at offensiveness detection, target classification, and target span detection while having room for improvement for target group classification and offensive span detection. We discover that the target group distribution differs drastically from the existing English datasets, and observe that providing the context information improves the model performance in offensiveness detection (+0.3), target classification (+1.5), and target group classification (+13.1). We publicly release the dataset and baseline models.", + "author": "Younghoon Jeong; Juhyun Oh; Jongwon Lee; Jaimeen Ahn; Jihyung Moon; Sungjoon Park; Alice Oh", + "authorids": "/y/younghoon-jeong/; /j/juhyun-oh/; /j/jongwon-lee/; /j/jaimeen-ahn/; /j/jihyung-moon/; /s/sungjoon-park/; /a/alice-oh/", + "bibtex": "@inproceedings{jeong-etal-2022-kold,\n title = \"{KOLD}: {K}orean Offensive Language Dataset\",\n author = \"Jeong, Younghoon and\n Oh, Juhyun and\n Lee, Jongwon and\n Ahn, Jaimeen and\n Moon, Jihyung and\n Park, Sungjoon and\n Oh, Alice\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.744/\",\n doi = \"10.18653/v1/2022.emnlp-main.744\",\n pages = \"10818--10833\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.744.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.744/", + "pdf_size": 3372364, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5444147925312288363&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "School of Computing, KAIST; School of Computing, KAIST; School of Computing, KAIST; School of Computing, KAIST; SoftlyAI Research, SoftlyAI; SoftlyAI Research, SoftlyAI; School of Computing, KAIST", + "aff_domain": "gmail.com;snu.ac.kr;gmail.com;kaist.ac.kr;softly.ai;softly.ai;kaist.edu", + "email": "gmail.com;snu.ac.kr;gmail.com;kaist.ac.kr;softly.ai;softly.ai;kaist.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;1;1;0", + "aff_unique_norm": "KAIST;SoftlyAI", + "aff_unique_dep": "School of Computing;SoftlyAI Research", + "aff_unique_url": "https://www.kaist.ac.kr;", + "aff_unique_abbr": "KAIST;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "South Korea;" + }, + { + "id": "2022.findings-emnlp.357", + "title": "KPDROP: Improving Absent Keyphrase Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Keyphrase generation is the task of generating phrases (keyphrases) that summarize the main topics of a given document. Keyphrases can be either present or absent from the given document. While the extraction of present keyphrases has received much attention in the past, only recently a stronger focus has been placed on the generation of absent keyphrases. However, generating absent keyphrases is challenging; even the best methods show only a modest degree of success. In this paper, we propose a model-agnostic approach called keyphrase dropout (or KPDrop) to improve absent keyphrase generation. In this approach, we randomly drop present keyphrases from the document and turn them into artificial absent keyphrases during training. We test our approach extensively and show that it consistently improves the absent performance of strong baselines in both supervised and resource-constrained semi-supervised settings.", + "author": "Jishnu Ray Chowdhury; Seo Yeon Park; Tuhin Kundu; Cornelia Caragea", + "authorids": "/j/jishnu-ray-chowdhury/; /s/seo-yeon-park/; /t/tuhin-kundu/; /c/cornelia-caragea/", + "bibtex": "@inproceedings{ray-chowdhury-etal-2022-kpdrop,\n title = \"{KPDROP}: Improving Absent Keyphrase Generation\",\n author = \"Ray Chowdhury, Jishnu and\n Park, Seo Yeon and\n Kundu, Tuhin and\n Caragea, Cornelia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.357/\",\n doi = \"10.18653/v1/2022.findings-emnlp.357\",\n pages = \"4853--4870\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.357.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.357/", + "pdf_size": 318701, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8646382428004963767&as_sdt=5,39&sciodt=0,39&hl=en", + "gs_version_total": 4, + "aff": "Computer Science, University of Illinois at Chicago\u2660; Computer Science, University of Illinois at Chicago\u2660; Amazon\u2663; Computer Science, University of Illinois at Chicago\u2660", + "aff_domain": "uic.edu;uic.edu;outlook.com;uic.edu", + "email": "uic.edu;uic.edu;outlook.com;uic.edu", + "github": "https://github.com/JRC1995/KPDrop", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "University of Illinois at Chicago;Amazon.com, Inc.", + "aff_unique_dep": "Computer Science;", + "aff_unique_url": "https://www.uic.edu;https://www.amazon.com", + "aff_unique_abbr": "UIC;Amazon", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chicago;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.276", + "title": "Keep Me Updated! Memory Management in Long-term Conversations", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Remembering important information from the past and continuing to talk about it in the present are crucial in long-term conversations. However, previous literature does not deal with cases where the memorized information is outdated, which may cause confusion in later conversations. To address this issue, we present a novel task and a corresponding dataset of memory management in long-term conversations, in which bots keep track of and bring up the latest information about users while conversing through multiple sessions. In order to support more precise and interpretable memory, we represent memory as unstructured text descriptions of key information and propose a new mechanism of memory management that selectively eliminates invalidated or redundant information. Experimental results show that our approach outperforms the baselines that leave the stored memory unchanged in terms of engagingness and humanness, with larger performance gap especially in the later sessions.", + "author": "Sanghwan Bae; Donghyun Kwak; Soyoung Kang; Min Young Lee; Sungdong Kim; Yuin Jeong; Hyeri Kim; Sang-Woo Lee; Woomyoung Park; Nako Sung", + "authorids": "/s/sanghwan-bae/; /d/donghyun-kwak/; /s/soyoung-kang/; /m/min-young-lee/; /s/sungdong-kim/; /y/yuin-jeong/; /h/hyeri-kim/; /s/sang-woo-lee/; /w/woomyoung-park/; /n/nako-sung/", + "bibtex": "@inproceedings{bae-etal-2022-keep,\n title = \"Keep Me Updated! Memory Management in Long-term Conversations\",\n author = \"Bae, Sanghwan and\n Kwak, Donghyun and\n Kang, Soyoung and\n Lee, Min Young and\n Kim, Sungdong and\n Jeong, Yuin and\n Kim, Hyeri and\n Lee, Sang-Woo and\n Park, Woomyoung and\n Sung, Nako\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.276/\",\n doi = \"10.18653/v1/2022.findings-emnlp.276\",\n pages = \"3769--3787\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.276.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.276/", + "pdf_size": 1509107, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8191579360412696210&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;;;;", + "aff_domain": ";;;;;;;;;", + "email": ";;;;;;;;;", + "github": "", + "project": "", + "author_num": 10 + }, + { + "id": "2022.emnlp-main.275", + "title": "Kernel-Whitening: Overcome Dataset Bias with Isotropic Sentence Embedding", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Dataset bias has attracted increasing attention recently for its detrimental effect on the generalization ability of fine-tuned models. The current mainstream solution is designing an additional shallow model to pre-identify biased instances. However, such two-stage methods scale up the computational complexity of training process and obstruct valid feature information while mitigating bias.To address this issue, we utilize the representation normalization method which aims at disentangling the correlations between features of encoded sentences. We find it also promising in eliminating the bias problem by providing isotropic data distribution. We further propose Kernel-Whitening, a Nystrom kernel approximation method to achieve more thorough debiasing on nonlinear spurious correlations. Our framework is end-to-end with similar time consumption to fine-tuning. Experiments show that Kernel-Whitening significantly improves the performance of BERT on out-of-distribution datasets while maintaining in-distribution accuracy.", + "author": "SongYang Gao; Shihan Dou; Qi Zhang; Xuanjing Huang", + "authorids": "/s/songyang-gao/; /s/shihan-dou/; /q/qi-zhang/; /x/xuan-jing-huang/", + "bibtex": "@inproceedings{gao-etal-2022-kernel,\n title = \"Kernel-Whitening: Overcome Dataset Bias with Isotropic Sentence Embedding\",\n author = \"Gao, SongYang and\n Dou, Shihan and\n Zhang, Qi and\n Huang, Xuanjing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.275/\",\n doi = \"10.18653/v1/2022.emnlp-main.275\",\n pages = \"4112--4122\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.275.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.275/", + "pdf_size": 6941903, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5337820161235522275&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "School of Computer Science, Fudan University, Shanghai, China+Shanghai Key Laboratory of Intelligent Information Processing, Shanghai, China; School of Computer Science, Fudan University, Shanghai, China; School of Computer Science, Fudan University, Shanghai, China+Shanghai Key Laboratory of Intelligent Information Processing, Shanghai, China; School of Computer Science, Fudan University, Shanghai, China+Shanghai Key Laboratory of Intelligent Information Processing, Shanghai, China", + "aff_domain": "m.fudan.edu.cn;m.fudan.edu.cn; ; ", + "email": "m.fudan.edu.cn;m.fudan.edu.cn; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;0+1;0+1", + "aff_unique_norm": "Fudan University;Shanghai Key Laboratory of Intelligent Information Processing", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "https://www.fudan.edu.cn;", + "aff_unique_abbr": "Fudan;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0+0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.427", + "title": "Keyphrase Generation Beyond the Boundaries of Title and Abstract", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Keyphrase generation aims at generating important phrases (keyphrases) that best describe a given document. In scholarly domains, current approaches have largely used only the title and abstract of the articles to generate keyphrases. In this paper, we comprehensively explore whether the integration of additional information from the full text of a given article or from semantically similar articles can be helpful for a neural keyphrase generation model or not. We discover that adding sentences from the full text, particularly in the form of the extractive summary of the article can significantly improve the generation of both types of keyphrases that are either present or absent from the text. Experimental results with three widely used models for keyphrase generation along with one of the latest transformer models suitable for longer documents, Longformer Encoder-Decoder (LED) validate the observation. We also present a new large-scale scholarly dataset FullTextKP for keyphrase generation. Unlike prior large-scale datasets, FullTextKP includes the full text of the articles along with the title and abstract. We release the source code at https://github.com/kgarg8/FullTextKP.", + "author": "Krishna Garg; Jishnu Ray Chowdhury; Cornelia Caragea", + "authorids": "/k/krishna-garg/; /j/jishnu-ray-chowdhury/; /c/cornelia-caragea/", + "bibtex": "@inproceedings{garg-etal-2022-keyphrase,\n title = \"Keyphrase Generation Beyond the Boundaries of Title and Abstract\",\n author = \"Garg, Krishna and\n Ray Chowdhury, Jishnu and\n Caragea, Cornelia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.427/\",\n doi = \"10.18653/v1/2022.findings-emnlp.427\",\n pages = \"5809--5821\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.427.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.427/", + "pdf_size": 324801, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7555196154695783983&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Computer Science, University of Illinois Chicago; Computer Science, University of Illinois Chicago; Computer Science, University of Illinois Chicago", + "aff_domain": "uic.edu;uic.edu;uic.edu", + "email": "uic.edu;uic.edu;uic.edu", + "github": "https://github.com/kgarg8/FullTextKP", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Illinois Chicago", + "aff_unique_dep": "Computer Science", + "aff_unique_url": "https://www.uic.edu", + "aff_unique_abbr": "UIC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chicago", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.529", + "title": "Keyphrase Generation via Soft and Hard Semantic Corrections", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Keyphrase generation aims to generate a set of condensed phrases given a source document. Although maximum likelihood estimation (MLE) based keyphrase generation methods have shown impressive performance, they suffer from the bias on the source-prediction sequence pair and the bias on the prediction-target pair. To tackle the above biases, we propose a novel correction model CorrKG on top of the MLE pipeline, where the biases are corrected via the optimal transport (OT) and a frequency-based filtering-and-sorting (FreqFS) strategy. Specifically, OT is introduced as soft correction to facilitate the alignment of salient information and rectify the semantic bias in the source document and predicted keyphrases pair. An adaptive semantic mass learning scheme is conducted on the vanilla OT to achieve a proper pair-wise optimal transport procedure, which promotes the OT learning brought by rectifying semantic masses dynamically. Besides, the FreqFS strategy is designed as hard correction to reduce the bias of predicted and ground truth keyphrases, and thus to generate accurate and sufficient keyphrases. Extensive experiments over multiple benchmark datasets show that our model achieves superior keyphrase generation as compared with the state-of-the-arts.", + "author": "Guangzhen Zhao; Guoshun Yin; Peng Yang; Yu Yao", + "authorids": "/g/guangzhen-zhao/; /g/guoshun-yin/; /p/peng-yang/; /y/yu-yao/", + "bibtex": "@inproceedings{zhao-etal-2022-keyphrase,\n title = \"Keyphrase Generation via Soft and Hard Semantic Corrections\",\n author = \"Zhao, Guangzhen and\n Yin, Guoshun and\n Yang, Peng and\n Yao, Yu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.529/\",\n doi = \"10.18653/v1/2022.emnlp-main.529\",\n pages = \"7757--7768\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.529.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.529/", + "pdf_size": 1184410, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10227973243029151022&as_sdt=20000005&sciodt=0,21&hl=en", + "gs_version_total": 0, + "aff": "School of Computer Science and Engineering, Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China; School of Computer Science and Engineering, Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China; School of Computer Science and Engineering, Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China; School of Computer Science and Engineering, Key Laboratory of Computer Network and Information Integration, Ministry of Education, Southeast University, China", + "aff_domain": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "email": "seu.edu.cn;seu.edu.cn;seu.edu.cn;seu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Southeast University", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.seu.edu.cn/", + "aff_unique_abbr": "SEU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.391", + "title": "Know Thy Strengths: Comprehensive Dialogue State Tracking Diagnostics", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent works that revealed the vulnerability of dialogue state tracking (DST) models to distributional shifts have made holistic comparisons on robustness and qualitative analyses increasingly important for understanding their relative performance. We present our findings from standardized and comprehensive DST diagnoses, which have previously been sparse and uncoordinated, using our toolkit, CheckDST, a collection of robustness tests and failure mode analytics. We discover that different classes of DST models have clear strengths and weaknesses, where generation models are more promising for handling language variety while span-based classification models are more robust to unseen entities. Prompted by this discovery, we also compare checkpoints from the same model and find that the standard practice of selecting checkpoints using validation loss/accuracy is prone to overfitting and each model class has distinct patterns of failure. Lastly, we demonstrate how our diagnoses motivate a pre-finetuning procedure with non-dialogue data that offers comprehensive improvements to generation models by alleviating the impact of distributional shifts through transfer learning.", + "author": "Hyundong Cho; Chinnadhurai Sankar; Christopher Lin; Kaushik Ram Sadagopan; Shahin Shayandeh; Asli Celikyilmaz; Jonathan May; Ahmad Beirami", + "authorids": "/h/hyundong-cho/; /c/chinnadhurai-sankar/; /c/christopher-lin/; /k/kaushik-ram-sadagopan/; /s/shahin-shayandeh/; /a/asli-celikyilmaz/; /j/jonathan-may/; /a/ahmad-beirami/", + "bibtex": "@inproceedings{cho-etal-2022-know,\n title = \"Know Thy Strengths: Comprehensive Dialogue State Tracking Diagnostics\",\n author = \"Cho, Hyundong and\n Sankar, Chinnadhurai and\n Lin, Christopher and\n Sadagopan, Kaushik Ram and\n Shayandeh, Shahin and\n Celikyilmaz, Asli and\n May, Jonathan and\n Beirami, Ahmad\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.391/\",\n doi = \"10.18653/v1/2022.findings-emnlp.391\",\n pages = \"5345--5359\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.391.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.391/", + "pdf_size": 901723, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6970157210327918019&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "University of Southern California Information Sciences Institute+Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; University of Southern California Information Sciences Institute; Google Research", + "aff_domain": "gmail.com; ; ; ; ; ; ; ", + "email": "gmail.com; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;1;1;1;1;1;0;2", + "aff_unique_norm": "University of Southern California;Meta Platforms, Inc.;Google", + "aff_unique_dep": "Information Sciences Institute;Meta AI;Google Research", + "aff_unique_url": "https://isi.usc.edu;https://meta.com;https://research.google", + "aff_unique_abbr": "USC ISI;Meta;Google Research", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0+0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.12", + "title": "Knowledge Distillation Transfer Sets and their Impact on Downstream NLU Tasks", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Teacher-student knowledge distillation is a popular technique for compressing today\u2019s prevailing large language models into manageable sizes that fit low-latency downstream applications. Both the teacher and the choice of transfer set used for distillation are crucial ingredients in creating a high quality student. Yet, the generic corpora used to pretrain the teacher and the corpora associated with the downstream target domain are often significantly different, which raises a natural question: should the student be distilled over the generic corpora, so as to learn from high-quality teacher predictions, or over the downstream task corpora to align with finetuning? Our study investigates this trade-off using Domain Classification (DC) and Intent Classification/Named Entity Recognition (ICNER) as downstream tasks. We distill several multilingual students from a larger multilingual LM with varying proportions of generic and task-specific datasets, and report their performance after finetuning on DC and ICNER. We observe significant improvements across tasks and test sets when only task-specific corpora is used. We also report on how the impact of adding task-specific data to the transfer set correlates with the similarity between generic and task-specific data. Our results clearly indicate that, while distillation from a generic LM benefits downstream tasks, students learn better using target domain data even if it comes at the price of noisier teacher predictions. In other words, target domain data still trumps teacher knowledge.", + "author": "Charith Peris; Lizhen Tan; Thomas Gueudre; Turan Gojayev; Pan Wei; Gokmen Oz", + "authorids": "/c/charith-peris/; /l/lizhen-tan/; /t/thomas-gueudre/; /t/turan-gojayev/; /p/pan-wei/; /g/gokmen-oz/", + "bibtex": "@inproceedings{peris-etal-2022-knowledge,\n title = \"Knowledge Distillation Transfer Sets and their Impact on Downstream {NLU} Tasks\",\n author = \"Peris, Charith and\n Tan, Lizhen and\n Gueudre, Thomas and\n Gojayev, Turan and\n Wei, Pan and\n Oz, Gokmen\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.12/\",\n doi = \"10.18653/v1/2022.emnlp-industry.12\",\n pages = \"128--137\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.12.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.12/", + "pdf_size": 511360, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16319174574272790623&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Amazon, Cambridge, USA; Amazon, Cambridge, USA; Amazon, Turin, Italy; Amazon, Berlin, Germany; Amazon, Cambridge, USA; Amazon, Cambridge, USA", + "aff_domain": "amazon.com;amazon.com;amazon.it;amazon.de;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.it;amazon.de;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Amazon", + "aff_unique_dep": "", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "0;0;1;2;0;0", + "aff_campus_unique": "Cambridge;Turin;Berlin", + "aff_country_unique_index": "0;0;1;2;0;0", + "aff_country_unique": "United States;Italy;Germany" + }, + { + "id": "2022.emnlp-industry.5", + "title": "Knowledge Distillation based Contextual Relevance Matching for E-commerce Product Search", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Online relevance matching is an essential task of e-commerce product search to boost the utility of search engines and ensure a smooth user experience. Previous work adopts either classical relevance matching models or Transformer-style models to address it. However, they ignore the inherent bipartite graph structures that are ubiquitous in e-commerce product search logs and are too inefficient to deploy online. In this paper, we design an efficient knowledge distillation framework for e-commerce relevance matching to integrate the respective advantages of Transformer-style models and classical relevance matching models. Especially for the core student model of the framework, we propose a novel method using k-order relevance modeling. The experimental results on large-scale real-world data (the size is 6 174 million) show that the proposed method significantly improves the prediction accuracy in terms of human relevance judgment. We deploy our method to JD.com online search platform. The A/B testing results show that our method significantly improves most business metrics under price sort mode and default sort mode.", + "author": "Ziyang Liu; Chaokun Wang; Hao Feng; Lingfei Wu; Liqun Yang", + "authorids": "/z/ziyang-liu/; /c/chaokun-wang/; /h/hao-feng/; /l/lingfei-wu/; /l/liqun-yang/", + "bibtex": "@inproceedings{liu-etal-2022-knowledge,\n title = \"Knowledge Distillation based Contextual Relevance Matching for {E}-commerce Product Search\",\n author = \"Liu, Ziyang and\n Wang, Chaokun and\n Feng, Hao and\n Wu, Lingfei and\n Yang, Liqun\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.5/\",\n doi = \"10.18653/v1/2022.emnlp-industry.5\",\n pages = \"63--76\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.5.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.5/", + "pdf_size": 4959926, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12795660161095420138&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Tsinghua University; Tsinghua University; Tsinghua University; JD.com; CNAEIT", + "aff_domain": "mails.thu.edu.cn;thu.edu.cn;mails.thu.edu.cn;email.wm.edu;cnaeit.com", + "email": "mails.thu.edu.cn;thu.edu.cn;mails.thu.edu.cn;email.wm.edu;cnaeit.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;2", + "aff_unique_norm": "Tsinghua University;JD.com;CNAEIT", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.jd.com;https://www.cnaeit.org", + "aff_unique_abbr": "THU;JD;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "China;Italy" + }, + { + "id": "2022.findings-emnlp.116", + "title": "Knowledge Graph Generation From Text", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this work we propose a novel end-to-end multi-stage Knowledge Graph (KG) generation system from textual inputs, separating the overall process into two stages. The graph nodes are generated first using pretrained language model, followed by a simple edge construction head, enabling efficient KG extraction from the text. For each stage we consider several architectural choices that can be used depending on the available training resources. We evaluated the model on a recent WebNLG 2020 Challenge dataset, matching the state-of-the-art performance on text-to-RDF generation task, as well as on New York Times (NYT) and a large-scale TekGen datasets, showing strong overall performance, outperforming the existing baselines. We believe that the proposed system can serve as a viable KG construction alternative to the existing linearization or sampling-based graph generation approaches.", + "author": "Igor Melnyk; Pierre Dognin; Payel Das", + "authorids": "/i/igor-melnyk/; /p/pierre-dognin/; /p/payel-das/", + "bibtex": "@inproceedings{melnyk-etal-2022-knowledge,\n title = \"Knowledge Graph Generation From Text\",\n author = \"Melnyk, Igor and\n Dognin, Pierre and\n Das, Payel\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.116/\",\n doi = \"10.18653/v1/2022.findings-emnlp.116\",\n pages = \"1610--1622\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.116.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.116/", + "pdf_size": 513265, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16926547496584506144&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.127", + "title": "Knowledge Injected Prompt Based Fine-tuning for Multi-label Few-shot ICD Coding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Automatic International Classification of Diseases (ICD) coding aims to assign multiple ICD codes to a medical note with average length of 3,000+ tokens. This task is challenging due to a high-dimensional space of multi-label assignment (tens of thousands of ICD codes) and the long-tail challenge: only a few codes (common diseases) are frequently assigned while most codes (rare diseases) are infrequently assigned. This study addresses the long-tail challenge by adapting a prompt-based fine-tuning technique with label semantics, which has been shown to be effective under few-shot setting. To further enhance the performance in medical domain, we propose a knowledge-enhanced longformer by injecting three domain-specific knowledge: hierarchy, synonym, and abbreviation with additional pretraining using contrastive learning. Experiments on MIMIC-III-full, a benchmark dataset of code assignment, show that our proposed method outperforms previous state-of-the-art method in 14.5% in marco F1 (from 10.3 to 11.8, P<0.001). To further test our model on few-shot setting, we created a new rare diseases coding dataset, MIMIC-III-rare50, on which our model improves marco F1 from 17.1 to 30.4 and micro F1 from 17.2 to 32.6 compared to previous method.", + "author": "Zhichao Yang; Shufan Wang; Bhanu Pratap Singh Rawat; Avijit Mitra; Hong Yu", + "authorids": "/z/zhichao-yang/; /s/shufan-wang/; /b/bhanu-pratap-singh-rawat/; /a/avijit-mitra/; /h/hong-yu/", + "bibtex": "@inproceedings{yang-etal-2022-knowledge-injected,\n title = \"Knowledge Injected Prompt Based Fine-tuning for Multi-label Few-shot {ICD} Coding\",\n author = \"Yang, Zhichao and\n Wang, Shufan and\n Rawat, Bhanu Pratap Singh and\n Mitra, Avijit and\n Yu, Hong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.127/\",\n doi = \"10.18653/v1/2022.findings-emnlp.127\",\n pages = \"1767--1781\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.127.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.127/", + "pdf_size": 2569024, + "gs_citation": 56, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5102172038735531976&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff": "College of Information and Computer Sciences, University of Massachusetts Amherst; College of Information and Computer Sciences, University of Massachusetts Amherst; College of Information and Computer Sciences, University of Massachusetts Amherst; College of Information and Computer Sciences, University of Massachusetts Amherst; College of Information and Computer Sciences, University of Massachusetts Amherst + Department of Computer Science, University of Massachusetts Lowell", + "aff_domain": "umass.edu;umass.edu;umass.edu;umass.edu;uml.edu", + "email": "umass.edu;umass.edu;umass.edu;umass.edu;uml.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0+1", + "aff_unique_norm": "University of Massachusetts Amherst;University of Massachusetts Lowell", + "aff_unique_dep": "College of Information and Computer Sciences;Department of Computer Science", + "aff_unique_url": "https://www.umass.edu;https://www.uml.edu", + "aff_unique_abbr": "UMass Amherst;UMass Lowell", + "aff_campus_unique_index": "0;0;0;0;0+1", + "aff_campus_unique": "Amherst;Lowell", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.207", + "title": "Knowledge Prompting in Pre-trained Language Model for Natural Language Understanding", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge-enhanced Pre-trained Language Model (PLM) has recently received significant attention, which aims to incorporate factual knowledge into PLMs. However, most existing methods modify the internal structures of fixed types of PLMs by stacking complicated modules, and introduce redundant and irrelevant factual knowledge from knowledge bases (KBs). In this paper, to address these problems, we introduce a seminal knowledge prompting paradigm and further propose a knowledge-prompting-based PLM framework KP-PLM. This framework can be flexibly combined with existing mainstream PLMs. Specifically, we first construct a knowledge sub-graph from KBs for each context. Then we design multiple continuous prompts rules and transform the knowledge sub-graph into natural language prompts. To further leverage the factual knowledge from these prompts, we propose two novel knowledge-aware self-supervised tasks including prompt relevance inspection and masked prompt modeling. Extensive experiments on multiple natural language understanding (NLU) tasks show the superiority of KP-PLM over other state-of-the-art methods in both full-resource and low-resource settings. Our source codes will be released upon the acceptance of the paper.", + "author": "Jianing Wang; Wenkang Huang; Minghui Qiu; Qiuhui Shi; Hongbin Wang; Xiang Li; Ming Gao", + "authorids": "/j/jianing-wang/; /w/wenkang-huang/; /m/minghui-qiu/; /q/qiuhui-shi/; /h/hongbin-wang/; /x/xiang-li/; /m/ming-gao/", + "bibtex": "@inproceedings{wang-etal-2022-knowledge,\n title = \"Knowledge Prompting in Pre-trained Language Model for Natural Language Understanding\",\n author = \"Wang, Jianing and\n Huang, Wenkang and\n Qiu, Minghui and\n Shi, Qiuhui and\n Wang, Hongbin and\n Li, Xiang and\n Gao, Ming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.207/\",\n doi = \"10.18653/v1/2022.emnlp-main.207\",\n pages = \"3164--3177\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.207.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.207/", + "pdf_size": 1005808, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8031426726955409437&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 3, + "aff": "School of Data Science and Engineering, East China Normal University, Shanghai, China; Ant Group, Hangzhou, China; Ant Group, Hangzhou, China; Ant Group, Hangzhou, China; Alibaba Group, Hangzhou, China; School of Data Science and Engineering, East China Normal University, Shanghai, China + KLATASDS-MOE, School of Statistics, East China Normal University, Shanghai, China; School of Data Science and Engineering, East China Normal University, Shanghai, China + KLATASDS-MOE, School of Statistics, East China Normal University, Shanghai, China", + "aff_domain": "gmail.com;alibaba-inc.com;antgroup.com;antgroup.com;alibaba-inc.com;dase.ecnu.edu.cn;dase.ecnu.edu.cn", + "email": "gmail.com;alibaba-inc.com;antgroup.com;antgroup.com;alibaba-inc.com;dase.ecnu.edu.cn;dase.ecnu.edu.cn", + "github": "https://github.com/wjn1996/KP-PLM", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;1;2;0+0;0+0", + "aff_unique_norm": "East China Normal University;Ant Group;Alibaba Group", + "aff_unique_dep": "School of Data Science and Engineering;;", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.antgroup.com;https://www.alibaba.com", + "aff_unique_abbr": "ECNU;Ant Group;Alibaba", + "aff_campus_unique_index": "0;1;1;1;1;0+0;0+0", + "aff_campus_unique": "Shanghai;Hangzhou", + "aff_country_unique_index": "0;0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.83", + "title": "Knowledge Stimulated Contrastive Prompting for Low-Resource Stance Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Stance Detection Task (SDT) aims at identifying the stance of the sentence towards a specific target and is usually modeled as a classification problem. Backgound knowledge is often necessary for stance detection with respect to a specific target, especially when there is no target explicitly mentioned in text. This paper focuses on the knowledge stimulation for low-resource stance detection tasks. We firstly explore to formalize stance detection as a prompt based contrastive learning task. At the same time, to make prompt learning suit to stance detection, we design a template mechanism to incorporate corresponding target into instance representation. Furthermore, we propose a masked language prompt joint contrastive learning approach to stimulate the knowledge inherit from the pre-trained model. The experimental results on three benchmarks show that knowledge stimulation is effective in stance detection accompanied with our proposed mechanism.", + "author": "Kai Zheng; Qingfeng Sun; Yaming Yang; Fei Xu", + "authorids": "/k/kai-zheng/; /q/qingfeng-sun/; /y/yaming-yang/; /f/fei-xu/", + "bibtex": "@inproceedings{zheng-etal-2022-knowledge,\n title = \"Knowledge Stimulated Contrastive Prompting for Low-Resource Stance Detection\",\n author = \"Zheng, Kai and\n Sun, Qingfeng and\n Yang, Yaming and\n Xu, Fei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.83/\",\n doi = \"10.18653/v1/2022.findings-emnlp.83\",\n pages = \"1168--1178\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.83.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.83/", + "pdf_size": 521046, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6385941524696094188&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Microsoft, Beijing, China; Microsoft, Beijing, China; Microsoft, Beijing, China; Microsoft, Beijing, China", + "aff_domain": "microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Microsoft", + "aff_unique_dep": "", + "aff_unique_url": "https://www.microsoft.com", + "aff_unique_abbr": "MSFT", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.645", + "title": "Knowledge Transfer from Answer Ranking to Answer Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent studies show that Question Answering (QA) based on Answer Sentence Selection (AS2) can be improved by generating an improved answer from the top-k ranked answer sentences (termed GenQA). This allows for synthesizing the information from multiple candidates into a concise, natural-sounding answer. However, creating large-scale supervised training data for GenQA models is very challenging. In this paper, we propose to train a GenQA model by transferring knowledge from a trained AS2 model, to overcome the aforementioned issue. First, we use an AS2 model to produce a ranking over answer candidates for a set of questions. Then, we use the top ranked candidate as the generation target, and the next k top ranked candidates as context for training a GenQA model. We also propose to use the AS2 model prediction scores for loss weighting and score-conditioned input/output shaping, to aid the knowledge transfer. Our evaluation on three public and one large industrial datasets demonstrates the superiority of our approach over the AS2 baseline, and GenQA trained using supervised data.", + "author": "Matteo Gabburo; Rik Koncel-Kedziorski; Siddhant Garg; Luca Soldaini; Alessandro Moschitti", + "authorids": "/m/matteo-gabburo/; /r/rik-koncel-kedziorski/; /s/siddhant-garg/; /l/luca-soldaini/; /a/alessandro-moschitti/", + "bibtex": "@inproceedings{gabburo-etal-2022-knowledge,\n title = \"Knowledge Transfer from Answer Ranking to Answer Generation\",\n author = \"Gabburo, Matteo and\n Koncel-Kedziorski, Rik and\n Garg, Siddhant and\n Soldaini, Luca and\n Moschitti, Alessandro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.645/\",\n doi = \"10.18653/v1/2022.emnlp-main.645\",\n pages = \"9481--9495\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.645.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.645/", + "pdf_size": 385179, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11977862169757909156&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "University of Trento; Amazon Alexa AI; Amazon Alexa AI; Allen Institute for AI; Amazon Alexa AI", + "aff_domain": "unitn.it;amazon.com;amazon.com;allenai.org;amazon.com", + "email": "unitn.it;amazon.com;amazon.com;allenai.org;amazon.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;2;1", + "aff_unique_norm": "University of Trento;Amazon;Allen Institute for AI", + "aff_unique_dep": ";Alexa AI;", + "aff_unique_url": "https://www.unitn.it;https://www.amazon.com;https://allenai.org", + "aff_unique_abbr": "UniTN;Amazon;AI2", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1", + "aff_country_unique": "Italy;United States" + }, + { + "id": "2022.findings-emnlp.467", + "title": "Knowledge-Enhanced Self-Supervised Prototypical Network for Few-Shot Event Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Prototypical network based joint methods have attracted much attention in few-shot event detection, which carry out event detection in a unified sequence tagging framework. However, these methods suffer from the inaccurate prototype representation problem, due to two main reasons: the number of instances for calculating prototypes is limited; And, they do not well capture the relationships among event prototypes. To deal with this problem, we propose a Knowledge-Enhanced self-supervised Prototypical Network, called KE-PN, for few-shot event detection. KE-PN adopts hybrid rules, which can automatically align event types to an external knowledge base, i.e., FrameNet, to obtain more instances.It proposes a self-supervised learning method to filter out noisy data from enhanced instances. KE-PN is further equipped with an auxiliary event type relationship classification module, which injects the relationship information into representations of event prototypes. Extensive experiments on three benchmark datasets, i.e., FewEvent, MAVEN, and ACE2005 demonstrate the state-of-the-art performance of KE-PN.", + "author": "Kailin Zhao; Xiaolong Jin; Long Bai; Jiafeng Guo; Xueqi Cheng", + "authorids": "/k/kailin-zhao/; /x/xiaolong-jin/; /l/long-bai/; /j/jiafeng-guo/; /x/xueqi-cheng/", + "bibtex": "@inproceedings{zhao-etal-2022-knowledge,\n title = \"Knowledge-Enhanced Self-Supervised Prototypical Network for Few-Shot Event Detection\",\n author = \"Zhao, Kailin and\n Jin, Xiaolong and\n Bai, Long and\n Guo, Jiafeng and\n Cheng, Xueqi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.467/\",\n doi = \"10.18653/v1/2022.findings-emnlp.467\",\n pages = \"6266--6275\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.467.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.467/", + "pdf_size": 2847724, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10482762797872322960&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.findings-emnlp.61", + "title": "Knowledge-Rich Self-Supervision for Biomedical Entity Linking", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Entity linking faces significant challenges such as prolific variations and prevalent ambiguities, especially in high-value domains with myriad entities. Standard classification approaches suffer from the annotation bottleneck and cannot effectively handle unseen entities. Zero-shot entity linking has emerged as a promising direction for generalizing to new entities, but it still requires example gold entity mentions during training and canonical descriptions for all entities, both of which are rarely available outside of Wikipedia. In this paper, we explore Knowledge-RIch Self-Supervision (KRISS) for biomedical entity linking, by leveraging readily available domain knowledge. In training, it generates self-supervised mention examples on unlabeled text using a domain ontology and trains a contextual encoder using contrastive learning. For inference, it samples self-supervised mentions as prototypes for each entity and conducts linking by mapping the test mention to the most similar prototype. Our approach can easily incorporate entity descriptions and gold mention labels if available. We conducted extensive experiments on seven standard datasets spanning biomedical literature and clinical notes. Without using any labeled information, our method produces KRISSBERT, a universal entity linker for four million UMLS entities that attains new state of the art, outperforming prior self-supervised methods by as much as 20 absolute points in accuracy. We released KRISSBERT at https://aka.ms/krissbert.", + "author": "Sheng Zhang; Hao Cheng; Shikhar Vashishth; Cliff Wong; Jinfeng Xiao; Xiaodong Liu; Tristan Naumann; Jianfeng Gao; Hoifung Poon", + "authorids": "/s/sheng-zhang/; /h/hao-cheng/; /s/shikhar-vashishth/; /c/cliff-wong/; /j/jinfeng-xiao/; /x/xiaodong-liu/; /t/tristan-naumann/; /j/jianfeng-gao/; /h/hoifung-poon/", + "bibtex": "@inproceedings{zhang-etal-2022-knowledge,\n title = \"Knowledge-Rich Self-Supervision for Biomedical Entity Linking\",\n author = \"Zhang, Sheng and\n Cheng, Hao and\n Vashishth, Shikhar and\n Wong, Cliff and\n Xiao, Jinfeng and\n Liu, Xiaodong and\n Naumann, Tristan and\n Gao, Jianfeng and\n Poon, Hoifung\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.61/\",\n doi = \"10.18653/v1/2022.findings-emnlp.61\",\n pages = \"868--880\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.61.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.61/", + "pdf_size": 2141549, + "gs_citation": 45, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8948126701371774620&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;;;", + "aff_domain": ";;;;;;;;", + "email": ";;;;;;;;", + "github": "", + "project": "https://aka.ms/krissbert", + "author_num": 9 + }, + { + "id": "2022.findings-emnlp.133", + "title": "Knowledge-augmented Self-training of A Question Rewriter for Conversational Knowledge Base Question Answering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The recent rise of conversational applications such as online customer service systems and intelligent personal assistants has promoted the development of conversational knowledge base question answering (ConvKBQA). Different from the traditional single-turn KBQA, ConvKBQA usually explores multi-turn questions around a topic, where ellipsis and coreference pose great challenges to the single-turn KBQA systems which require self-contained questions. In this paper, we propose a rewrite-and-reason framework to first produce a full-fledged rewritten question based on the conversation history and then reason the answer by existing single-turn KBQA models. To overcome the absence of the rewritten supervision signals, we introduce a knowledge-augmented self-training mechanism to transfer the question rewriter from another dataset to adapt to the current knowledge base. Our question rewriter is decoupled from the subsequent QA process, which makes it easy to be united with either retrieval-based or semantic parsing-based KBQA models. Experiment results demonstrate the effectiveness of our method and a new state-of-the-art result is achieved. The code and dataset are available online now.", + "author": "Xirui Ke; Jing Zhang; Xin Lv; Yiqi Xu; Shulin Cao; Cuiping Li; Hong Chen; Juanzi Li", + "authorids": "/x/xirui-ke/; /j/jing-zhang/; /x/xin-lv/; /y/yiqi-xu/; /s/shulin-cao/; /c/cuiping-li/; /h/hong-chen/; /j/juanzi-li/", + "bibtex": "@inproceedings{ke-etal-2022-knowledge,\n title = \"Knowledge-augmented Self-training of A Question Rewriter for Conversational Knowledge Base Question Answering\",\n author = \"Ke, Xirui and\n Zhang, Jing and\n Lv, Xin and\n Xu, Yiqi and\n Cao, Shulin and\n Li, Cuiping and\n Chen, Hong and\n Li, Juanzi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.133/\",\n doi = \"10.18653/v1/2022.findings-emnlp.133\",\n pages = \"1844--1856\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.133.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.133/", + "pdf_size": 484983, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10708902193700148421&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education; School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education; Department of Computer Science and Technology, Tsinghua University, Beijing, China; School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education; Department of Computer Science and Technology, Tsinghua University, Beijing, China; School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education; School of Information, Renmin University of China, Beijing, China+Key Laboratory of Data Engineering and Knowledge Engineering of Ministry of Education; Department of Computer Science and Technology, Tsinghua University, Beijing, China", + "aff_domain": "ruc.edu.cn;ruc.edu.cn;mails.tsinghua.edu.cn;ruc.edu.cn;mails.tsinghua.edu.cn;ruc.edu.cn;ruc.edu.cn;tsinghua.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;mails.tsinghua.edu.cn;ruc.edu.cn;mails.tsinghua.edu.cn;ruc.edu.cn;ruc.edu.cn;tsinghua.edu.cn", + "github": "https://github.com/RUCKBReasoning/QuestionRewriter", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;0+1;2;0+1;2;0+1;0+1;2", + "aff_unique_norm": "Renmin University of China;Ministry of Education;Tsinghua University", + "aff_unique_dep": "School of Information;Key Laboratory of Data Engineering and Knowledge Engineering;Department of Computer Science and Technology", + "aff_unique_url": "http://www.ruc.edu.cn;;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "RUC;;THU", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0+0;0;0+0;0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.250", + "title": "Knowledge-grounded Dialog State Tracking", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Knowledge (including structured knowledge such as schema and ontology and unstructured knowledge such as web corpus) is a critical part of dialog understanding, especially for unseen tasks and domains. Traditionally, such domain-specific knowledge is encoded implicitly into model parameters for the execution of downstream tasks, which makes training inefficient. In addition , such models are not easily transferable to new tasks with different schemas. In this work, we propose to perform dialog state tracking grounded on knowledge encoded externally. We query relevant knowledge of various forms based on the dialog context where such information can grounds the prediction of dialog states. We demonstrate superior performance of our proposed method over strong baselines, especially in the few-shot learning setting.", + "author": "Dian Yu; Mingqiu Wang; Yuan Cao; Laurent El Shafey; Izhak Shafran; Hagen Soltau", + "authorids": "/d/dian-yu/; /m/mingqiu-wang/; /y/yuan-cao/; /l/laurent-el-shafey/; /i/izhak-shafran/; /h/hagen-soltau/", + "bibtex": "@inproceedings{yu-etal-2022-knowledge,\n title = \"Knowledge-grounded Dialog State Tracking\",\n author = \"Yu, Dian and\n Wang, Mingqiu and\n Cao, Yuan and\n El Shafey, Laurent and\n Shafran, Izhak and\n Soltau, Hagen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.250/\",\n doi = \"10.18653/v1/2022.findings-emnlp.250\",\n pages = \"3428--3435\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.250.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.250/", + "pdf_size": 299885, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14977912990093681339&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Google Research; Google Research; Google Research; Google Research; Google Research; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.404", + "title": "LADIS: Language Disentanglement for 3D Shape Editing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Natural language interaction is a promising direction for democratizing 3D shape design. However, existing methods for text-driven 3D shape editing face challenges in producing decoupled, local edits to 3D shapes. We address this problem by learning disentangled latent representations that ground language in 3D geometry. To this end, we propose a complementary tool set including a novel network architecture, a disentanglement loss, and a new editing procedure. Additionally, to measure edit locality, we define a new metric that we call part-wise edit precision. We show that our method outperforms existing SOTA methods by 20% in terms of edit locality, and up to 6.6% in terms of language reference resolution accuracy. Human evaluations additionally show that compared to the existing SOTA, our method produces shape edits that are more local, more semantically accurate, and more visually obvious. Our work suggests that by solely disentangling language representations, downstream 3D shape editing can become more local to relevant parts, even if the model was never given explicit part-based supervision.", + "author": "Ian Huang; Panos Achlioptas; Tianyi Zhang; Sergei Tulyakov; Minhyuk Sung; Leonidas Guibas", + "authorids": "/i/ian-huang/; /p/panos-achlioptas/; /t/tianyi-zhang/; /s/sergei-tulyakov/; /m/minhyuk-sung/; /l/leonidas-guibas/", + "bibtex": "@inproceedings{huang-etal-2022-ladis,\n title = \"{LADIS}: Language Disentanglement for 3{D} Shape Editing\",\n author = \"Huang, Ian and\n Achlioptas, Panos and\n Zhang, Tianyi and\n Tulyakov, Sergei and\n Sung, Minhyuk and\n Guibas, Leonidas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.404/\",\n doi = \"10.18653/v1/2022.findings-emnlp.404\",\n pages = \"5519--5532\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.404.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.404/", + "pdf_size": 6553795, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15231526118899589334&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "Department of Computer Science, Stanford University\u2020; Snap Research\u00a7; Department of Computer Science, Stanford University\u2020; Snap Research\u00a7; School of Computing, KAIST\u2021; Department of Computer Science, Stanford University\u2020", + "aff_domain": "cs.stanford.edu;snap.com;cs.stanford.edu;snap.com;kaist.ac.kr;cs.stanford.edu", + "email": "cs.stanford.edu;snap.com;cs.stanford.edu;snap.com;kaist.ac.kr;cs.stanford.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;1;2;0", + "aff_unique_norm": "Stanford University;Snap Inc.;KAIST", + "aff_unique_dep": "Department of Computer Science;Snap Research;School of Computing", + "aff_unique_url": "https://www.stanford.edu;https://research.snap.com;https://www.kaist.ac.kr", + "aff_unique_abbr": "Stanford;Snap;KAIST", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Stanford;", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "United States;South Korea" + }, + { + "id": "2022.findings-emnlp.33", + "title": "LEMON: Language-Based Environment Manipulation via Execution-Guided Pre-training", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Language-based environment manipulation requires agents to manipulate the environment following natural language instructions, which is challenging due to the huge space of the environments.To address this challenge, various approaches have been proposed in recent work. Although these approaches work well for their intended environments, they are difficult to generalize across environments. In this work, we propose LEMON, a general framework for language-based environment manipulation tasks. Specifically, we first specify a general approach for language-based environment manipulation tasks, which can deal with various environments using the same generative language model. Then we propose an execution-guided pre-training strategy to inject prior knowledge of environments to the language model with a pure synthetic pre-training corpus. Experimental results on tasks including Alchemy, Scene, Tangrams, ProPara and Recipes demonstrate the effectiveness of LEMON: it achieves new state-of-the-art results on four of the tasks, and the execution-guided pre-training strategy brings remarkable improvements on all experimental tasks.", + "author": "Qi Shi; Qian Liu; Bei Chen; Yu Zhang; Ting Liu; Jian-Guang Lou", + "authorids": "/q/qi-shi/; /q/qian-liu/; /b/bei-chen/; /y/yu-zhang/; /t/ting-liu/; /j/jian-guang-lou/", + "bibtex": "@inproceedings{shi-etal-2022-lemon,\n title = \"{LEMON}: Language-Based Environment Manipulation via Execution-Guided Pre-training\",\n author = \"Shi, Qi and\n Liu, Qian and\n Chen, Bei and\n Zhang, Yu and\n Liu, Ting and\n Lou, Jian-Guang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.33/\",\n doi = \"10.18653/v1/2022.findings-emnlp.33\",\n pages = \"471--485\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.33.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.33/", + "pdf_size": 731011, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14784483391391545721&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, Harbin, China; Beihang University, Beijing, China + Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, Harbin, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, Harbin, China; Microsoft Research Asia, Beijing, China", + "aff_domain": "ir.hit.edu.cn;buaa.edu.cn;microsoft.com;ir.hit.edu.cn;ir.hit.edu.cn;microsoft.com", + "email": "ir.hit.edu.cn;buaa.edu.cn;microsoft.com;ir.hit.edu.cn;ir.hit.edu.cn;microsoft.com", + "github": "https://github.com/microsoft/ContextualSP", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1+2;2;0;0;2", + "aff_unique_norm": "Harbin Institute of Technology;Beihang University;Microsoft Research Asia", + "aff_unique_dep": "Research Center for Social Computing and Information Retrieval;;Research", + "aff_unique_url": "http://www.hit.edu.cn/;http://www.buaa.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "HIT;BUAA;MSRA", + "aff_campus_unique_index": "0;1+1;1;0;0;1", + "aff_campus_unique": "Harbin;Beijing", + "aff_country_unique_index": "0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.392", + "title": "LILA: A Unified Benchmark for Mathematical Reasoning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Mathematical reasoning skills are essential for general-purpose intelligentsystems to perform tasks from grocery shopping to climate modeling.Towards evaluating and improving AI systems in this domain, we proposeLILA, a unified mathematical reasoning benchmark consisting of 23 diversetasks along four dimensions:(i) mathematical abilities e.g., arithmetic, calculus (ii) language format e.g., question-answering, fill-in-the-blanks (iii) language diversity e.g., no language, simple language (iv) external knowledge e.g., commonsense, physics. We construct our benchmark by extending 20 datasets benchmark by collecting task instructions and solutions in the form of Python programs,thereby obtaining explainable solutions in addition to the correct answer.We additionally introduce two evaluation datasets to measure out-of-distribution performance and robustness to language perturbation.Finally, we introduce BHASKARA,a general-purpose mathematical reasoning model trained on LILA. Importantly, we find that multi-tasking leads to significant improvements (average relative improvement of 21.83% F1 score vs. single-task models),while the best performing model only obtains 60.40%,indicating the room for improvement in general mathematical reasoning and understanding.", + "author": "Swaroop Mishra; Matthew Finlayson; Pan Lu; Leonard Tang; Sean Welleck; Chitta Baral; Tanmay Rajpurohit; Oyvind Tafjord; Ashish Sabharwal; Peter Clark; Ashwin Kalyan", + "authorids": "/s/swaroop-mishra/; /m/matthew-finlayson/; /p/pan-lu/; /l/leonard-tang/; /s/sean-welleck/; /c/chitta-baral/; /t/tanmay-rajpurohit/; /o/oyvind-tafjord/; /a/ashish-sabharwal/; /p/peter-clark/; /a/ashwin-kalyan/", + "bibtex": "@inproceedings{mishra-etal-2022-lila,\n title = \"{LILA}: A Unified Benchmark for Mathematical Reasoning\",\n author = \"Mishra, Swaroop and\n Finlayson, Matthew and\n Lu, Pan and\n Tang, Leonard and\n Welleck, Sean and\n Baral, Chitta and\n Rajpurohit, Tanmay and\n Tafjord, Oyvind and\n Sabharwal, Ashish and\n Clark, Peter and\n Kalyan, Ashwin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.392/\",\n doi = \"10.18653/v1/2022.emnlp-main.392\",\n pages = \"5807--5832\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.392.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.392/", + "pdf_size": 1968037, + "gs_citation": 129, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9454384250676590048&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Arizona State Univeristy\u2020; The Allen Institute for AI\u2021; UCLA\u2020; Harvard University; The Allen Institute for AI; Arizona State Univeristy; Georgia Institute of Technology; The Allen Institute for AI; The Allen Institute for AI; The Allen Institute for AI; The Allen Institute for AI\u2021", + "aff_domain": "asu.edu;allenai.org;ucla.edu;harvard.edu;allenai.org;asu.edu; gatech.edu;allenai.org;allenai.org;allenai.org;allenai.org", + "email": "asu.edu;allenai.org;ucla.edu;harvard.edu;allenai.org;asu.edu; gatech.edu;allenai.org;allenai.org;allenai.org;allenai.org", + "github": "https://github.com/allenai/Lila", + "project": "https://huggingface.co/allenai/bhaskara", + "author_num": 11, + "aff_unique_index": "0;1;2;3;1;0;4;1;1;1;1", + "aff_unique_norm": "Arizona State University;Allen Institute for AI;University of California, Los Angeles;Harvard University;Georgia Institute of Technology", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.asu.edu;https://allenai.org;https://www.ucla.edu;https://www.harvard.edu;https://www.gatech.edu", + "aff_unique_abbr": "ASU;AI2;UCLA;Harvard;Georgia Tech", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.360", + "title": "LOPS: Learning Order Inspired Pseudo-Label Selection for Weakly Supervised Text Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Weakly supervised text classification methods typically train a deep neural classifier based on pseudo-labels. The quality of pseudo-labels is crucial to final performance but they are inevitably noisy due to their heuristic nature, so selecting the correct ones has a huge potential for performance boost. One straightforward solution is to select samples based on the softmax probability scores in the neural classifier corresponding to their pseudo-labels. However, we show through our experiments that such solutions are ineffective and unstable due to the erroneously high-confidence predictions from poorly calibrated models. Recent studies on the memorization effects of deep neural models suggest that these models first memorize training samples with clean labels and then those with noisy labels. Inspired by this observation, we propose a novel pseudo-label selection method LOPS that takes learning order of samples into consideration. We hypothesize that the learning order reflects the probability of wrong annotation in terms of ranking, and therefore, propose to select the samples that are learnt earlier. LOPS can be viewed as a strong performance-boost plug-in to most existing weakly-supervised text classification methods, as confirmed in extensive experiments on four real-world datasets.", + "author": "Dheeraj Mekala; Chengyu Dong; Jingbo Shang", + "authorids": "/d/dheeraj-mekala/; /c/chengyu-dong/; /j/jingbo-shang/", + "bibtex": "@inproceedings{mekala-etal-2022-lops,\n title = \"{LOPS}: Learning Order Inspired Pseudo-Label Selection for Weakly Supervised Text Classification\",\n author = \"Mekala, Dheeraj and\n Dong, Chengyu and\n Shang, Jingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.360/\",\n doi = \"10.18653/v1/2022.findings-emnlp.360\",\n pages = \"4894--4908\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.360.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.360/", + "pdf_size": 665284, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2883977639142159428&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of California San Diego; University of California San Diego; Hal\u0131c\u0131o \u02d8glu Data Science Institute, University of California San Diego", + "aff_domain": "ucsd.edu;ucsd.edu;ucsd.edu", + "email": "ucsd.edu;ucsd.edu;ucsd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "University of California, San Diego;University of California San Diego", + "aff_unique_dep": ";Hal\u0131c\u0131o\u011flu Data Science Institute", + "aff_unique_url": "https://ucsd.edu;https://ucsd.edu", + "aff_unique_abbr": "UCSD;UCSD", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "San Diego", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.529", + "title": "LPC: A Logits and Parameter Calibration Framework for Continual Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "When we execute the typical fine-tuning paradigm on continuously sequential tasks, the model will suffer from the catastrophic forgetting problem (i.e., the model tends to adjust old parameters according to the new knowledge, which leads to the loss of previously acquired concepts). People proposed replay-based methods by accessing old data from extra storage and maintaining the parameters of old concepts, which actually raise the privacy issue and larger memory requirements. In this work, we aim to achieve the sequential/continual learning of knowledge without accessing the old data. The core idea is to calibrate the parameters and logits (output) so that preserving old parameters and generalized learning on new concepts can be solved simultaneously. Our proposed framework includes two major components, Logits Calibration (LC) and Parameter Calibration (PC). The LC focuses on calibrating the learning of novel models with old models, and PC aims to preserve the parameters of old models. These two operations can maintain the old knowledge while learning new tasks without storing previous data. We conduct experiments on various scenarios of the GLUE (the General Language Understanding Evaluation) benchmark. The experimental results show that our model achieves state-of-the-art performance in all scenarios.", + "author": "Xiaodi Li; Zhuoyi Wang; Dingcheng Li; Latifur Khan; Bhavani Thuraisingham", + "authorids": "/x/xiaodi-li/; /z/zhuoyi-wang/; /d/dingcheng-li/; /l/latifur-khan/; /b/bhavani-thuraisingham/", + "bibtex": "@inproceedings{li-etal-2022-lpc,\n title = \"{LPC}: A Logits and Parameter Calibration Framework for Continual Learning\",\n author = \"Li, Xiaodi and\n Wang, Zhuoyi and\n Li, Dingcheng and\n Khan, Latifur and\n Thuraisingham, Bhavani\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.529/\",\n doi = \"10.18653/v1/2022.findings-emnlp.529\",\n pages = \"7142--7155\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.529.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.529/", + "pdf_size": 423629, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7237888212142994630&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 0, + "aff": "The University of Texas at Dallas; The University of Texas at Dallas; Baidu Research; The University of Texas at Dallas; The University of Texas at Dallas", + "aff_domain": "utdallas.edu;utdallas.edu;gmail.com;utdallas.edu;utdallas.edu", + "email": "utdallas.edu;utdallas.edu;gmail.com;utdallas.edu;utdallas.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "University of Texas at Dallas;Baidu", + "aff_unique_dep": ";Baidu Research", + "aff_unique_url": "https://www.utdallas.edu;https://research.baidu.com", + "aff_unique_abbr": "UT Dallas;Baidu", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Dallas;", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.184", + "title": "LVP-M3: Language-aware Visual Prompt for Multilingual Multimodal Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multimodal Machine Translation (MMT) focuses on enhancing text-only translation with visual features, which has attracted considerable attention from both natural language processing and computer vision communities. Recent advances still struggle to train a separate model for each language pair, which is costly and unaffordable when the number of languages increases in the real world. In other words, the multilingual multimodal machine translation (Multilingual MMT) task has not been investigated, which aims to handle the aforementioned issues by providing a shared semantic space for multiple languages. Besides, the image modality has no language boundaries, which is superior to bridging the semantic gap between languages. To this end,we first propose the Multilingual MMT task by establishing two new Multilingual MMT benchmark datasets covering seven languages.Then, an effective baseline LVP-M3 using visual prompts is proposed to support translations between different languages,which includes three stages (token encoding, language-aware visual prompt generation, and language translation). Extensive experimental results on our constructed benchmark datasets demonstrate the effectiveness of LVP-M3 method for Multilingual MMT.", + "author": "Hongcheng Guo; Jiaheng Liu; Haoyang Huang; Jian Yang; Zhoujun Li; Dongdong Zhang; Zheng Cui", + "authorids": "/h/hongcheng-guo/; /j/jiaheng-liu/; /h/haoyang-huang/; /j/jian-yang/; /z/zhoujun-li/; /d/dongdong-zhang/; /z/zheng-cui/", + "bibtex": "@inproceedings{guo-etal-2022-lvp,\n title = \"{LVP}-{M}3: Language-aware Visual Prompt for Multilingual Multimodal Machine Translation\",\n author = \"Guo, Hongcheng and\n Liu, Jiaheng and\n Huang, Haoyang and\n Yang, Jian and\n Li, Zhoujun and\n Zhang, Dongdong and\n Cui, Zheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.184/\",\n doi = \"10.18653/v1/2022.emnlp-main.184\",\n pages = \"2862--2872\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.184.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.184/", + "pdf_size": 1785846, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9899101219101117621&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Beihang University; Beihang University; Microsoft Research Asia; Beihang University; Beihang University; Microsoft Research Asia; Microsoft Research Asia", + "aff_domain": "buaa.edu.cn;buaa.edu.cn;microsoft.com;buaa.edu.cn;buaa.edu.cn;microsoft.com;microsoft.com", + "email": "buaa.edu.cn;buaa.edu.cn;microsoft.com;buaa.edu.cn;buaa.edu.cn;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;1;1", + "aff_unique_norm": "Beihang University;Microsoft Research", + "aff_unique_dep": ";Research", + "aff_unique_url": "http://www.buaa.edu.cn/;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "BUAA;MSR Asia", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.177", + "title": "Label-Driven Denoising Framework for Multi-Label Few-Shot Aspect Category Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multi-Label Few-Shot Aspect Category Detection (FS-ACD) is a new sub-task of aspect-based sentiment analysis, which aims to detect aspect categories accurately with limited training instances. Recently, dominant works use the prototypical network to accomplish this task, and employ the attention mechanism to extract keywords of aspect category from the sentences to produce the prototype for each aspect. However, they still suffer from serious noise problems: (1) due to lack of sufficient supervised data, the previous methods easily catch noisy words irrelevant to the current aspect category, which largely affects the quality of the generated prototype; (2) the semantically-close aspect categories usually generate similar prototypes, which are mutually noisy and confuse the classifier seriously. In this paper, we resort to the label information of each aspect to tackle the above problems, along with proposing a novel Label-Driven Denoising Framework (LDF). Extensive experimental results show that our framework achieves better performance than other state-of-the-art methods.", + "author": "Fei Zhao; Yuchen Shen; Zhen Wu; Xinyu Dai", + "authorids": "/f/fei-zhao/; /y/yuchen-shen/; /z/zhen-wu/; /x/xinyu-dai/", + "bibtex": "@inproceedings{zhao-etal-2022-label,\n title = \"Label-Driven Denoising Framework for Multi-Label Few-Shot Aspect Category Detection\",\n author = \"Zhao, Fei and\n Shen, Yuchen and\n Wu, Zhen and\n Dai, Xinyu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.177/\",\n doi = \"10.18653/v1/2022.findings-emnlp.177\",\n pages = \"2390--2402\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.177.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.177/", + "pdf_size": 994601, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13769549988247628030&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University; School of Information and Software Engineering, University of Electronic Science and Technology of China; National Key Laboratory for Novel Software Technology, Nanjing University; National Key Laboratory for Novel Software Technology, Nanjing University", + "aff_domain": "smail.nju.edu.cn;gmail.com;nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;gmail.com;nju.edu.cn;nju.edu.cn", + "github": "https://github.com/1429904852/LDF", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Nanjing University;University of Electronic Science and Technology of China", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;School of Information and Software Engineering", + "aff_unique_url": "http://www.nju.edu.cn;https://www.uestc.edu.cn", + "aff_unique_abbr": "Nanjing University;UESTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.673", + "title": "Label-aware Multi-level Contrastive Learning for Cross-lingual Spoken Language Understanding", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite the great success of spoken language understanding (SLU) in high-resource languages, it remains challenging in low-resource languages mainly due to the lack of labeled training data. The recent multilingual code-switching approach achieves better alignments of model representations across languages by constructing a mixed-language context in zero-shot cross-lingual SLU. However, current code-switching methods are limited to implicit alignment and disregard the inherent semantic structure in SLU, i.e., the hierarchical inclusion of utterances, slots and words. In this paper, we propose to model the utterance-slot-word structure by a multi-level contrastive learning framework at the utterance, slot and word levels to facilitate explicit alignment. Novel code-switching schemes are introduced to generate hard negative examples for our contrastive learning framework. Furthermore, we develop a label-aware joint model leveraging label semantics to enhance the implicit alignment and feed to contrastive learning. Our experimental results show that our proposed methods significantly improve the performance compared with the strong baselines on two zero-shot cross-lingual SLU benchmark datasets.", + "author": "Shining Liang; Linjun Shou; Jian Pei; Ming Gong; Wanli Zuo; Xianglin Zuo; Daxin Jiang", + "authorids": "/s/shining-liang/; /l/linjun-shou/; /j/jian-pei/; /m/ming-gong/; /w/wanli-zuo/; /x/xianglin-zuo/; /d/daxin-jiang/", + "bibtex": "@inproceedings{liang-etal-2022-label,\n title = \"Label-aware Multi-level Contrastive Learning for Cross-lingual Spoken Language Understanding\",\n author = \"Liang, Shining and\n Shou, Linjun and\n Pei, Jian and\n Gong, Ming and\n Zuo, Wanli and\n Zuo, Xianglin and\n Jiang, Daxin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.673/\",\n doi = \"10.18653/v1/2022.emnlp-main.673\",\n pages = \"9903--9918\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.673.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.673/", + "pdf_size": 5496707, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2604581954334240302&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "", + "project": "", + "author_num": 7 + }, + { + "id": "2022.emnlp-main.233", + "title": "Language Contamination Helps Explains the Cross-lingual Capabilities of English Pretrained Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "English pretrained language models, which make up the backbone of many modern NLP systems, require huge amounts of unlabeled training data. These models are generally presented as being trained only on English text but have been found to transfer surprisingly well to other languages. We investigate this phenomenon and find that common English pretraining corpora actually contain significant amounts of non-English text: even when less than 1% of data is not English (well within the error rate of strong language classifiers), this leads to hundreds of millions of foreign language tokens in large-scale datasets. We then demonstrate that even these small percentages of non-English data facilitate cross-lingual transfer for models trained on them, with target language performance strongly correlated to the amount of in-language data seen during pretraining. In light of these findings, we argue that no model is truly monolingual when pretrained at scale, which should be considered when evaluating cross-lingual transfer.", + "author": "Terra Blevins; Luke Zettlemoyer", + "authorids": "/t/terra-blevins/; /l/luke-zettlemoyer/", + "bibtex": "@inproceedings{blevins-zettlemoyer-2022-language,\n title = \"Language Contamination Helps Explains the Cross-lingual Capabilities of {E}nglish Pretrained Models\",\n author = \"Blevins, Terra and\n Zettlemoyer, Luke\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.233/\",\n doi = \"10.18653/v1/2022.emnlp-main.233\",\n pages = \"3563--3574\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.233.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.233/", + "pdf_size": 1779666, + "gs_citation": 73, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8900068312395248087&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington + Meta AI Research; Paul G. Allen School of Computer Science & Engineering, University of Washington + Meta AI Research", + "aff_domain": "cs.washington.edu;cs.washington.edu", + "email": "cs.washington.edu;cs.washington.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "University of Washington;Meta Platforms, Inc.", + "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering;Meta AI Research", + "aff_unique_url": "https://www.washington.edu;https://meta.com", + "aff_unique_abbr": "UW;Meta AI", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Seattle;", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.161", + "title": "Language Model Decomposition: Quantifying the Dependency and Correlation of Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained language models (LMs), such as BERT (Devlin et al., 2018) and its variants, have led to significant improvements on various NLP tasks in past years. However, a theoretical framework for studying their relationships is still missing. In this paper, we fill this gap by investigating the linear dependency between pre-trained LMs. The linear dependency of LMs is defined analogously to the linear dependency of vectors. We propose Language Model Decomposition (LMD) to represent a LM using a linear combination of other LMs as basis, and derive the closed-form solution. A goodness-of-fit metric for LMD similar to the coefficient of determination is defined and used to measure the linear dependency of a set of LMs. In experiments, we find that BERT and eleven (11) BERT-like LMs are 91% linearly dependent. This observation suggests that current state-of-the-art (SOTA) LMs are highly \u201ccorrelated\u201d. To further advance SOTA we need more diverse and novel LMs that are less dependent on existing LMs.", + "author": "Hao Zhang", + "authorids": "/h/hao-zhang/", + "bibtex": "@inproceedings{zhang-2022-language,\n title = \"Language Model Decomposition: Quantifying the Dependency and Correlation of Language Models\",\n author = \"Zhang, Hao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.161/\",\n doi = \"10.18653/v1/2022.emnlp-main.161\",\n pages = \"2508--2517\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.161.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.161/", + "pdf_size": 524969, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10462489659602278654&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Supportiv Inc, Berkeley, CA", + "aff_domain": "alumni.princeton.edu", + "email": "alumni.princeton.edu", + "github": "https://github.com/haozhg/lmd", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Supportiv Inc", + "aff_unique_dep": "", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Berkeley", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.406", + "title": "Language Model Detoxification in Dialogue with Contextualized Stance Control", + "track": "main", + "status": "finding", + "award": false, + "abstract": "To reduce the toxic degeneration in a pretrained Language Model (LM), previous work on Language Model detoxification has focused on reducing the toxicity of the generation itself (self-toxicity) without consideration of the context. As a result, a type of implicit offensive language where the generations support the offensive language in the context is ignored. Different from the LM controlling tasks in previous work, where the desired attributes are fixed for generation, the desired stance of the generation depends on the offensiveness of the context. Therefore, we propose a novel control method to do context-dependent detoxification with the stance taken into consideration. We introduce meta prefixes to learn the contextualized stance control strategy and to generate the stance control prefix according to the input context. The generated stance prefix is then combined with the toxicity control prefix to guide the response generation. Experimental results show that our proposed method can effectively learn the context-dependent stance control strategies while keeping a low self-toxicity of the underlying LM.", + "author": "Jing Qian; Xifeng Yan", + "authorids": "/j/jing-qian/; /x/xifeng-yan/", + "bibtex": "@inproceedings{qian-yan-2022-language,\n title = \"Language Model Detoxification in Dialogue with Contextualized Stance Control\",\n author = \"Qian, Jing and\n Yan, Xifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.406/\",\n doi = \"10.18653/v1/2022.findings-emnlp.406\",\n pages = \"5548--5558\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.406.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.406/", + "pdf_size": 561168, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3602844880180336623&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, University of California, Santa Barbara; Department of Computer Science, University of California, Santa Barbara", + "aff_domain": "cs.ucsb.edu;cs.ucsb.edu", + "email": "cs.ucsb.edu;cs.ucsb.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of California, Santa Barbara", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.ucsb.edu", + "aff_unique_abbr": "UCSB", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Santa Barbara", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.96", + "title": "Language Model Pre-Training with Sparse Latent Typing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Modern large-scale Pre-trained Language Models (PLMs) have achieved tremendous success on a wide range of downstream tasks. However, most of the LM pre-training objectives only focus on text reconstruction, but have not sought to learn latent-level interpretable representations of sentences. In this paper, we manage to push the language models to obtain a deeper understanding of sentences by proposing a new pre-training objective, Sparse Latent Typing, which enables the model to sparsely extract sentence-level keywords with diverse latent types. Experimental results show that our model is able to learn interpretable latent type categories in a self-supervised manner without using any external knowledge. Besides, the language model pre-trained with such an objective also significantly improves Information Extraction related downstream tasks in both supervised and few-shot settings. Our code is publicly available at https://github.com/renll/SparseLT.", + "author": "Liliang Ren; Zixuan Zhang; Han Wang; Clare Voss; ChengXiang Zhai; Heng Ji", + "authorids": "/l/liliang-ren/; /z/zixuan-zhang/; /h/han-wang/; /c/clare-voss/; /c/chengxiang-zhai/; /h/heng-ji/", + "bibtex": "@inproceedings{ren-etal-2022-language,\n title = \"Language Model Pre-Training with Sparse Latent Typing\",\n author = \"Ren, Liliang and\n Zhang, Zixuan and\n Wang, Han and\n Voss, Clare and\n Zhai, ChengXiang and\n Ji, Heng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.96/\",\n doi = \"10.18653/v1/2022.emnlp-main.96\",\n pages = \"1480--1494\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.96.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.96/", + "pdf_size": 955639, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9431852004753411045&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; Amazon Alexa; US Army Research Laboratory; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign", + "aff_domain": "illinois.edu;illinois.edu;amazon.com;army.mil;illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;amazon.com;army.mil;illinois.edu;illinois.edu", + "github": "https://github.com/renll/SparseLT", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;0;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;Amazon;US Army Research Laboratory", + "aff_unique_dep": ";Amazon Alexa;", + "aff_unique_url": "https://illinois.edu;https://www.amazon.com/alexa;https://www.arl.army.mil", + "aff_unique_abbr": "UIUC;Amazon Alexa;ARL", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.64", + "title": "Language Models Are Poor Learners of Directional Inference", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We examine LMs\u2019 competence of directional predicate entailments by supervised fine-tuning with prompts. Our analysis shows that contrary to their apparent success on standard NLI, LMs show limited ability to learn such directional inference; moreover, existing datasets fail to test directionality, and/or are infested by artefacts that can be learnt as proxy for entailments, yielding over-optimistic results. In response, we present BoOQA (Boolean Open QA), a robust multi-lingual evaluation benchmark for directional predicate entailments, extrinsic to existing training sets. On BoOQA, we establish baselines and show evidence of existing LM-prompting models being incompetent directional entailment learners, in contrast to entailment graphs, however limited by sparsity.", + "author": "Tianyi Li; Mohammad Javad Hosseini; Sabine Weber; Mark Steedman", + "authorids": "/t/tianyi-li/; /m/mohammad-javad-hosseini/; /s/sabine-weber/; /m/mark-steedman/", + "bibtex": "@inproceedings{li-etal-2022-language,\n title = \"Language Models Are Poor Learners of Directional Inference\",\n author = \"Li, Tianyi and\n Hosseini, Mohammad Javad and\n Weber, Sabine and\n Steedman, Mark\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.64/\",\n doi = \"10.18653/v1/2022.findings-emnlp.64\",\n pages = \"903--921\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.64.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.64/", + "pdf_size": 776331, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=238866402261843844&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "School of Informatics, University of Edinburgh; School of Informatics, University of Edinburgh + Google Research; School of Informatics, University of Edinburgh; School of Informatics, University of Edinburgh", + "aff_domain": "ed.ac.uk;ed.ac.uk;sms.ed.ac.uk;inf.ed.ac.uk", + "email": "ed.ac.uk;ed.ac.uk;sms.ed.ac.uk;inf.ed.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0+1;0;0", + "aff_unique_norm": "University of Edinburgh;Google", + "aff_unique_dep": "School of Informatics;Google Research", + "aff_unique_url": "https://www.ed.ac.uk;https://research.google", + "aff_unique_abbr": "Edinburgh;Google Research", + "aff_campus_unique_index": "0;0+1;0;0", + "aff_campus_unique": "Edinburgh;Mountain View", + "aff_country_unique_index": "0;0+1;0;0", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "2022.findings-emnlp.16", + "title": "Language Models Understand Us, Poorly", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Some claim language models understand us. Others won\u2019t hear it. To clarify, I investigate three views of human language understanding: as-mapping, as-reliability and as-representation. I argue that while behavioral reliability is necessary for understanding, internal representations are sufficient; they climb the right hill. I review state-of-the-art language and multi-modal models: they are pragmatically challenged by under-specification of form. I question the Scaling Paradigm: limits on resources may prohibit scaled-up models from approaching understanding. Last, I describe how as-representation advances a science of understanding. We need work which probes model internals, adds more of human language, and measures what models can learn.", + "author": "Jared Moore", + "authorids": "/j/jared-moore/", + "bibtex": "@inproceedings{moore-2022-language,\n title = \"Language Models Understand Us, Poorly\",\n author = \"Moore, Jared\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.16/\",\n doi = \"10.18653/v1/2022.findings-emnlp.16\",\n pages = \"214--222\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.16.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.16/", + "pdf_size": 156173, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17174472644985416257&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Washington School of Computer Science", + "aff_domain": "jaredmoore.org", + "email": "jaredmoore.org", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Washington", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.washington.edu", + "aff_unique_abbr": "UW", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Seattle", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.423", + "title": "Language Models as Agent Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Language models (LMs) are trained on collections of documents, written by individual human agents to achieve specific goals in the outside world. During training, LMs have access only to text of these documents, with no direct evidence of the internal states of the agents that produced them\u2014a fact often used to argue that LMs are incapable of modeling goal-directed aspects of human language production and comprehension. Can LMs trained on text learn anything at all about the relationship between language and use? I argue that LMs are models of communicative intentions in a specific, narrow sense. When performing next word prediction given a textual context, an LM can infer and represent properties of an agent likely to have produced that context. These representations can in turn influence subsequent LM generation in the same way that agents\u2019 communicative intentions influence their language. I survey findings from the recent literature showing that\u2014even in today\u2019s non-robust and error-prone models\u2014LMs infer and use representations of fine-grained communicative intentions and high-level beliefs and goals. Despite the limited nature of their training data, they can thus serve as building blocks for systems that communicate and act intentionally.", + "author": "Jacob Andreas", + "authorids": "/j/jacob-andreas/", + "bibtex": "@inproceedings{andreas-2022-language,\n title = \"Language Models as Agent Models\",\n author = \"Andreas, Jacob\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.423/\",\n doi = \"10.18653/v1/2022.findings-emnlp.423\",\n pages = \"5769--5779\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.423.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.423/", + "pdf_size": 606736, + "gs_citation": 205, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13151043703595915923&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "MIT", + "aff_domain": "mit.edu", + "email": "mit.edu", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Massachusetts Institute of Technology", + "aff_unique_dep": "", + "aff_unique_url": "https://web.mit.edu", + "aff_unique_abbr": "MIT", + "aff_country_unique_index": "0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.90", + "title": "Language Models of Code are Few-Shot Commonsense Learners", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We address the general task of structured commonsense reasoning: given a natural language input, the goal is to generate a graph such as an event or a reasoning-graph.To employ large language models (LMs) for this task, existing approaches \u2018serialize\u2019 the output graph as a flat list of nodes and edges.Although feasible, these serialized graphs strongly deviate from the natural language corpora that LMs were pre-trained on, hindering LMs from generating them correctly. In this paper, we show that when we instead frame structured commonsense reasoning tasks as code generation tasks, pre-trained LMs of code are better structured commonsense reasoners than LMs of natural language, even when the downstream task does not involve source code at all.We demonstrate our approach across three diverse structured commonsense reasoning tasks. In all these natural language tasks, we show that using our approach, a code generation LM (codex) outperforms natural-LMs that are fine-tuned on the target task (T5) and other strong LMs such as GPT-3 in the few-shot setting.", + "author": "Aman Madaan; Shuyan Zhou; Uri Alon; Yiming Yang; Graham Neubig", + "authorids": "/a/aman-madaan/; /s/shuyan-zhou/; /u/uri-alon/; /y/yiming-yang/; /g/graham-neubig/", + "bibtex": "@inproceedings{madaan-etal-2022-language,\n title = \"Language Models of Code are Few-Shot Commonsense Learners\",\n author = \"Madaan, Aman and\n Zhou, Shuyan and\n Alon, Uri and\n Yang, Yiming and\n Neubig, Graham\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.90/\",\n doi = \"10.18653/v1/2022.emnlp-main.90\",\n pages = \"1384--1403\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.90.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.90/", + "pdf_size": 814724, + "gs_citation": 205, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8413916372764514138&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Language Technologies Institute, Carnegie Mellon University, USA+Inspired Cognition, USA; Language Technologies Institute, Carnegie Mellon University, USA; Language Technologies Institute, Carnegie Mellon University, USA; Language Technologies Institute, Carnegie Mellon University, USA; Language Technologies Institute, Carnegie Mellon University, USA+Inspired Cognition, USA", + "aff_domain": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "email": "cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "github": "https://github.com/madaan/CoCoGen", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;0;0;0+1", + "aff_unique_norm": "Carnegie Mellon University;Inspired Cognition", + "aff_unique_dep": "Language Technologies Institute;", + "aff_unique_url": "https://www.cmu.edu;", + "aff_unique_abbr": "CMU;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.27", + "title": "Language Models that Seek for Knowledge: Modular Search & Generation for Dialogue and Prompt Completion", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Language models (LMs) have recently been shown to generate more factual responses by employing modularity (Zhou et al., 2022) in combination with retrieval (Adolphs et al., 2021). We extend the recent approach of Adolphs et al. (2021) to include internet search as a module. Our SeeKeR (Search engine->Knowledge->Response) method thus applies a single LM to three modular tasks in succession: search, generating knowledge, and generating a final response. We show that, when using SeeKeR as a dialogue model, it outperforms the state-of-the-art model BlenderBot 2 (Chen et al., 2021) on open-domain knowledge-grounded conversations for the same number of parameters, in terms of consistency, knowledge and per-turn engagingness. SeeKeR applied to topical prompt completions as a standard language model outperforms GPT2 (Radford et al., 2019) and GPT3 (Brown et al., 2020) in terms of factuality and topicality, despite GPT3 being a vastly larger model. Our code and models are made publicly available.", + "author": "Kurt Shuster; Mojtaba Komeili; Leonard Adolphs; Stephen Roller; Arthur Szlam; Jason Weston", + "authorids": "/k/kurt-shuster/; /m/mojtaba-komeili/; /l/leonard-adolphs/; /s/stephen-roller/; /a/arthur-szlam/; /j/jason-weston/", + "bibtex": "@inproceedings{shuster-etal-2022-language,\n title = \"Language Models that Seek for Knowledge: Modular Search {\\&} Generation for Dialogue and Prompt Completion\",\n author = \"Shuster, Kurt and\n Komeili, Mojtaba and\n Adolphs, Leonard and\n Roller, Stephen and\n Szlam, Arthur and\n Weston, Jason\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.27/\",\n doi = \"10.18653/v1/2022.findings-emnlp.27\",\n pages = \"373--393\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.27.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.27/", + "pdf_size": 3495642, + "gs_citation": 124, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5502337170161580823&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Meta AI; Meta AI; ETH Z\u00fcrich; Meta AI; Meta AI; Meta AI", + "aff_domain": "; ; ; ; ; ", + "email": "; ; ; ; ; ", + "github": "", + "project": "https://parl.ai/projects/seeker", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "Meta Platforms, Inc.;ETH Z\u00fcrich", + "aff_unique_dep": "Meta AI;", + "aff_unique_url": "https://meta.com;https://www.ethz.ch", + "aff_unique_abbr": "Meta;ETHZ", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "United States;Switzerland" + }, + { + "id": "2022.findings-emnlp.271", + "title": "Language Prior Is Not the Only Shortcut: A Benchmark for Shortcut Learning in VQA", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Visual Question Answering (VQA) models are prone to learn the shortcut solution formed by dataset biases rather than the intended solution. To evaluate the VQA models\u2019 reasoning ability beyond shortcut learning, the VQA-CP v2 dataset introduces a distribution shift between the training and test set given a question type. In this way, the model cannot use the training set shortcut (from question type to answer) to perform well on the test set. However, VQA-CP v2 only considers one type of shortcut and thus still cannot guarantee that the model relies on the intended solution rather than a solution specific to this shortcut. To overcome this limitation, we propose a new dataset that considers varying types of shortcuts by constructing different distribution shifts in multiple OOD test sets. In addition, we overcome the three troubling practices in the use of VQA-CP v2, e.g., selecting models using OOD test sets, and further standardize OOD evaluation procedure. Our benchmark provides a more rigorous and comprehensive testbed for shortcut learning in VQA. We benchmark recent methods and find that methods specifically designed for particular shortcuts fail to simultaneously generalize to our varying OOD test sets. We also systematically study the varying shortcuts and provide several valuable findings, which may promote the exploration of shortcut learning in VQA.", + "author": "Qingyi Si; Fandong Meng; Mingyu Zheng; Zheng Lin; Yuanxin Liu; Peng Fu; Yanan Cao; Weiping Wang; Jie Zhou", + "authorids": "/q/qingyi-si/; /f/fandong-meng/; /m/mingyu-zheng/; /z/zheng-lin/; /y/yuanxin-liu/; /p/peng-fu/; /y/yanan-cao/; /w/weiping-wang/; /j/jie-zhou/", + "bibtex": "@inproceedings{si-etal-2022-language,\n title = \"Language Prior Is Not the Only Shortcut: A Benchmark for Shortcut Learning in {VQA}\",\n author = \"Si, Qingyi and\n Meng, Fandong and\n Zheng, Mingyu and\n Lin, Zheng and\n Liu, Yuanxin and\n Fu, Peng and\n Cao, Yanan and\n Wang, Weiping and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.271/\",\n doi = \"10.18653/v1/2022.findings-emnlp.271\",\n pages = \"3698--3712\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.271.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.271/", + "pdf_size": 8523395, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2105777161210243794&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;;", + "aff_domain": ";;;;;;;;", + "email": ";;;;;;;;", + "github": "https://github.com/PhoebusSi/VQA-VS", + "project": "", + "author_num": 9 + }, + { + "id": "2022.findings-emnlp.123", + "title": "Language as a fingerprint: Self-supervised learning of user encodings using transformers", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The way we talk carries information about who we are. Demographics, personality, clinical conditions, political preferences influence what we speak about and how, suggesting that many individual attributes could be inferred from adequate encodings of linguistic behavior. Conversely, conditioning text representations on author attributes has been shown to improve model performance in many NLP tasks. Previous research on individual differences and language representations has mainly focused on predicting selected attributes from text, or on conditioning text representations on such attributes for author-based contextualization. Here, we present a self-supervised approach to learning language-based user encodings using transformers. Using a large corpus of Reddit submissions, we fine-tune DistilBERT on user-based triplet loss. We show that fine-tuned models can pick up on complex linguistic signatures of users, and that they are able to infer rich information about them. Through a series of intrinsic analyses and probing tasks, we provide evidence that fine-tuning enhances models\u2019 ability to abstract generalizable user information, which yields performance advantages for user-based downstream tasks. We discuss applications in language-based assessment and contextualized and personalized NLP.", + "author": "Roberta Rocca; Tal Yarkoni", + "authorids": "/r/roberta-rocca/; /t/tal-yarkoni/", + "bibtex": "@inproceedings{rocca-yarkoni-2022-language,\n title = \"Language as a fingerprint: Self-supervised learning of user encodings using transformers\",\n author = \"Rocca, Roberta and\n Yarkoni, Tal\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.123/\",\n doi = \"10.18653/v1/2022.findings-emnlp.123\",\n pages = \"1701--1714\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.123.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.123/", + "pdf_size": 885748, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12838560666961448135&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "Aarhus University + University of Texas at Austin; University of Texas at Austin", + "aff_domain": "cas.au.dk;utexas.edu", + "email": "cas.au.dk;utexas.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "Aarhus University;University of Texas at Austin", + "aff_unique_dep": ";", + "aff_unique_url": "https://au.dk;https://www.utexas.edu", + "aff_unique_abbr": "AU;UT Austin", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Austin", + "aff_country_unique_index": "0+1;1", + "aff_country_unique": "Denmark;United States" + }, + { + "id": "2022.emnlp-main.669", + "title": "Large Dual Encoders Are Generalizable Retrievers", + "track": "main", + "status": "Main", + "award": false, + "abstract": "It has been shown that dual encoders trained on one domain often fail to generalize to other domains for retrieval tasks. One widespread belief is that the bottleneck layer of a dual encoder, where the final score is simply a dot-product between a query vector and a passage vector, is too limited compared to models with fine-grained interactions between the query and the passage. In this paper, we challenge this belief by scaling up the size of the dual encoder model while keeping the bottleneck layer as a single dot-product with a fixed size. With multi-stage training, scaling up the model size brings significant improvement on a variety of retrieval tasks, especially for out-of-domain generalization. We further analyze the impact of the bottleneck layer and demonstrate diminishing improvement when scaling up the embedding size. Experimental results show that our dual encoders, Generalizable T5-based dense Retrievers (GTR), outperform previous sparse and dense retrievers on the BEIR dataset significantly. Most surprisingly, our ablation study finds that GTR is very data efficient, as it only needs 10% of MS Marco supervised data to match the out-of-domain performance of using all supervised data.", + "author": "Jianmo Ni; Chen Qu; Jing Lu; Zhuyun Dai; Gustavo Hernandez Abrego; Ji Ma; Vincent Zhao; Yi Luan; Keith Hall; Ming-Wei Chang; Yinfei Yang", + "authorids": "/j/jianmo-ni/; /c/chen-qu/; /j/jing-lu/; /z/zhuyun-dai/; /g/gustavo-hernandez-abrego/; /j/ji-ma/; /v/vincent-zhao/; /y/yi-luan/; /k/keith-hall/; /m/ming-wei-chang/; /y/yinfei-yang/", + "bibtex": "@inproceedings{ni-etal-2022-large,\n title = \"Large Dual Encoders Are Generalizable Retrievers\",\n author = \"Ni, Jianmo and\n Qu, Chen and\n Lu, Jing and\n Dai, Zhuyun and\n Hernandez Abrego, Gustavo and\n Ma, Ji and\n Zhao, Vincent and\n Luan, Yi and\n Hall, Keith and\n Chang, Ming-Wei and\n Yang, Yinfei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.669/\",\n doi = \"10.18653/v1/2022.emnlp-main.669\",\n pages = \"9844--9855\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.669.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.669/", + "pdf_size": 664607, + "gs_citation": 418, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8936679817503595022&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;;;;;", + "aff_domain": ";;;;;;;;;;", + "email": ";;;;;;;;;;", + "github": "", + "project": "", + "author_num": 11 + }, + { + "id": "2022.emnlp-main.130", + "title": "Large language models are few-shot clinical information extractors", + "track": "main", + "status": "Main", + "award": false, + "abstract": "A long-running goal of the clinical NLP community is the extraction of important variables trapped in clinical notes. However, roadblocks have included dataset shift from the general domain and a lack of public clinical corpora and annotations. In this work, we show that large language models, such as InstructGPT (Ouyang et al., 2022), perform well at zero- and few-shot information extraction from clinical text despite not being trained specifically for the clinical domain. Whereas text classification and generation performance have already been studied extensively in such models, here we additionally demonstrate how to leverage them to tackle a diverse set of NLP tasks which require more structured outputs, including span identification, token-level sequence classification, and relation extraction. Further, due to the dearth of available data to evaluate these systems, we introduce new datasets for benchmarking few-shot clinical information extraction based on a manual re-annotation of the CASI dataset (Moon et al., 2014) for new tasks. On the clinical extraction tasks we studied, the GPT-3 systems significantly outperform existing zero- and few-shot baselines.", + "author": "Monica Agrawal; Stefan Hegselmann; Hunter Lang; Yoon Kim; David Sontag", + "authorids": "/m/monica-agrawal/; /s/stefan-hegselmann/; /h/hunter-lang/; /y/yoon-kim/; /d/david-sontag/", + "bibtex": "@inproceedings{agrawal-etal-2022-large,\n title = \"Large language models are few-shot clinical information extractors\",\n author = \"Agrawal, Monica and\n Hegselmann, Stefan and\n Lang, Hunter and\n Kim, Yoon and\n Sontag, David\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.130/\",\n doi = \"10.18653/v1/2022.emnlp-main.130\",\n pages = \"1998--2022\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.130.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.130/", + "pdf_size": 633987, + "gs_citation": 448, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16496590369024274230&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "MIT CSAIL; University of M\u00fcnster; MIT CSAIL; MIT CSAIL; MIT CSAIL", + "aff_domain": "mit.edu;uni-muenster.de;mit.edu;mit.edu;mit.edu", + "email": "mit.edu;uni-muenster.de;mit.edu;mit.edu;mit.edu", + "github": "", + "project": "https://huggingface.co/datasets/mitclinicalml/clinical-ie", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology;University of M\u00fcnster", + "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory;", + "aff_unique_url": "https://www.csail.mit.edu;https://www.uni-muenster.de", + "aff_unique_abbr": "MIT CSAIL;UM", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Cambridge;", + "aff_country_unique_index": "0;1;0;0;0", + "aff_country_unique": "United States;Germany" + }, + { + "id": "2022.findings-emnlp.484", + "title": "Large-Scale Differentially Private BERT", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this work, we study the large-scale pretraining of BERT-Large (Devlin et al., 2019) with differentially private SGD (DP-SGD). We show that combined with a careful implementation, scaling up the batch size to millions (i.e., mega-batches) improves the utility of the DP-SGD step for BERT; we also enhance the training efficiency by using an increasing batch size schedule. Our implementation builds on the recent work of Subramani et al (2020), who demonstrated that the overhead of a DP-SGD step is minimized with effective use of JAX (Bradbury et al., 2018; Frostig et al., 2018) primitives in conjunction with the XLA compiler (XLA team and collaborators, 2017). Our implementation achieves a masked language model accuracy of 60.5% at a batch size of 2M, for epsilon=5, which is a reasonable privacy setting. To put this number in perspective, non-private BERT models achieve an accuracy of \u223c70%.", + "author": "Rohan Anil; Badih Ghazi; Vineet Gupta; Ravi Kumar; Pasin Manurangsi", + "authorids": "/r/rohan-anil/; /b/badih-ghazi/; /v/vineet-gupta/; /r/ravi-kumar/; /p/pasin-manurangsi/", + "bibtex": "@inproceedings{anil-etal-2022-large,\n title = \"Large-Scale Differentially Private {BERT}\",\n author = \"Anil, Rohan and\n Ghazi, Badih and\n Gupta, Vineet and\n Kumar, Ravi and\n Manurangsi, Pasin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.484/\",\n doi = \"10.18653/v1/2022.findings-emnlp.484\",\n pages = \"6481--6491\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.484.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.484/", + "pdf_size": 732677, + "gs_citation": 162, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18318172964403360340&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Google; Google; Google; Google; Google", + "aff_domain": "google.com;gmail.com;google.com;gmail.com;google.com", + "email": "google.com;gmail.com;google.com;gmail.com;google.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "", + "aff_unique_url": "https://www.google.com", + "aff_unique_abbr": "Google", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.64", + "title": "Large-scale Machine Translation for Indian Languages in E-commerce under Low Resource Constraints", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "The democratization of e-commerce platforms has moved an increasingly diversified Indian user base to shop online. We have deployed reliable and precise large-scale Machine Translation systems for several Indian regional languages in this work. Building such systems is a challenge because of the low-resource nature of the Indian languages. We develop a structured model development pipeline as a closed feedback loop with external manual feedback through an Active Learning component. We show strong synthetic parallel data generation capability and consistent improvements to the model over iterations. Starting with 1.2M parallel pairs for English-Hindi we have compiled a corpus with 400M+ synthetic high quality parallel pairs across different domains. Further, we need colloquial translations to preserve the intent and friendliness of English content in regional languages, and make it easier to understand for our users. We perform robust and effective domain adaptation steps to achieve colloquial such translations. Over iterations, we show 9.02 BLEU points improvement for English to Hindi translation model. Along with Hindi, we show that the overall approach and best practices extends well to other Indian languages, resulting in deployment of our models across 7 Indian Languages.", + "author": "Amey Patil; Nikesh Garera", + "authorids": "/a/amey-patil/; /n/nikesh-garera/", + "bibtex": "@inproceedings{patil-garera-2022-large,\n title = \"Large-scale Machine Translation for {I}ndian Languages in {E}-commerce under Low Resource Constraints\",\n author = \"Patil, Amey and\n Garera, Nikesh\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.64/\",\n doi = \"10.18653/v1/2022.emnlp-industry.64\",\n pages = \"627--634\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.64.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.64/", + "pdf_size": 727521, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11113618728633392461&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Flipkart; Flipkart", + "aff_domain": "flipkart.com;flipkart.com", + "email": "flipkart.com;flipkart.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Flipkart", + "aff_unique_dep": "", + "aff_unique_url": "https://www.flipkart.com", + "aff_unique_abbr": "Flipkart", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.emnlp-main.659", + "title": "Late Fusion with Triplet Margin Objective for Multimodal Ideology Prediction and Analysis", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prior work on ideology prediction has largely focused on single modalities, i.e., text or images. In this work, we introduce the task of multimodal ideology prediction, where a model predicts binary or five-point scale ideological leanings, given a text-image pair with political content. We first collect five new large-scale datasets with English documents and images along with their ideological leanings, covering news articles from a wide range of mainstream media in US and social media posts from Reddit and Twitter. We conduct in-depth analyses on news articles and reveal differences in image content and usage across the political spectrum. Furthermore, we perform extensive experiments and ablation studies, demonstrating the effectiveness of targeted pretraining objectives on different model components. Our best-performing model, a late-fusion architecture pretrained with a triplet objective over multimodal content, outperforms the state-of-the-art text-only model by almost 4% and a strong multimodal baseline with no pretraining by over 3%.", + "author": "Changyuan Qiu; Winston Wu; Xinliang Frederick Zhang; Lu Wang", + "authorids": "/c/changyuan-qiu/; /w/winston-wu/; /x/xinliang-frederick-zhang/; /l/lu-wang/", + "bibtex": "@inproceedings{qiu-etal-2022-late,\n title = \"Late Fusion with Triplet Margin Objective for Multimodal Ideology Prediction and Analysis\",\n author = \"Qiu, Changyuan and\n Wu, Winston and\n Zhang, Xinliang Frederick and\n Wang, Lu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.659/\",\n doi = \"10.18653/v1/2022.emnlp-main.659\",\n pages = \"9720--9736\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.659.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.659/", + "pdf_size": 781050, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17386249510038856453&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Computer Science and Engineering, University of Michigan; Computer Science and Engineering, University of Michigan; Computer Science and Engineering, University of Michigan; Computer Science and Engineering, University of Michigan", + "aff_domain": "umich.edu;umich.edu;umich.edu;umich.edu", + "email": "umich.edu;umich.edu;umich.edu;umich.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Michigan", + "aff_unique_dep": "Computer Science and Engineering", + "aff_unique_url": "https://www.umich.edu", + "aff_unique_abbr": "UM", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Ann Arbor", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.95", + "title": "Late Prompt Tuning: A Late Prompt Could Be Better Than Many Prompts", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Prompt tuning is a parameter-efficient tuning (PETuning) method for utilizing pre-trained models (PTMs) that simply prepends a soft prompt to the input and only optimizes the prompt to adapt PTMs to downstream tasks. Although it is parameter- and deployment-efficient, its performance still lags behind other state-of-the-art PETuning methods. Besides, the training cost of prompt tuning is not significantly reduced due to the back-propagation through the entire model. Through empirical analyses, we shed some light on the lagging performance of prompt tuning and recognize a trade-off between the propagation distance from label signals to the inserted prompt and the influence of the prompt on model outputs. Further, we present Late Prompt Tuning (LPT) that inserts a late prompt into an intermediate layer of the PTM instead of the input layer or all layers. The late prompt is obtained by a neural prompt generator conditioned on the hidden states before the prompt insertion layer and therefore is instance-dependent. Through extensive experimental results across various tasks and PTMs, we show that LPT can achieve competitive performance to full model tuning and other PETuning methods under both full-data and few-shot scenarios while possessing faster training speed and lower memory cost.", + "author": "Xiangyang Liu; Tianxiang Sun; Xuanjing Huang; Xipeng Qiu", + "authorids": "/x/xiangyang-liu/; /t/tianxiang-sun/; /x/xuan-jing-huang/; /x/xipeng-qiu/", + "bibtex": "@inproceedings{liu-etal-2022-late,\n title = \"Late Prompt Tuning: A Late Prompt Could Be Better Than Many Prompts\",\n author = \"Liu, Xiangyang and\n Sun, Tianxiang and\n Huang, Xuanjing and\n Qiu, Xipeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.95/\",\n doi = \"10.18653/v1/2022.findings-emnlp.95\",\n pages = \"1325--1338\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.95.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.95/", + "pdf_size": 1743806, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13094583500062393867&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff": "School of Computer Science, Fudan University+Shanghai Key Laboratory of Intelligent Information Processing, Fudan University; School of Computer Science, Fudan University+Shanghai Key Laboratory of Intelligent Information Processing, Fudan University; School of Computer Science, Fudan University+Shanghai Key Laboratory of Intelligent Information Processing, Fudan University; School of Computer Science, Fudan University+Shanghai Key Laboratory of Intelligent Information Processing, Fudan University", + "aff_domain": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0+0;0+0", + "aff_unique_norm": "Fudan University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "https://www.fudan.edu.cn", + "aff_unique_abbr": "Fudan", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.369", + "title": "LawngNLI: A Long-Premise Benchmark for In-Domain Generalization from Short to Long Contexts and for Implication-Based Retrieval", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Natural language inference has trended toward studying contexts beyond the sentence level. An important application area is law: past cases often do not foretell how they apply to new situations and implications must be inferred. This paper introduces LawngNLI, constructed from U.S. legal opinions with automatic labels with high human-validated accuracy. Premises are long and multigranular. Experiments show two use cases. First, LawngNLI can benchmark for in-domain generalization from short to long contexts. It has remained unclear if large-scale long-premise NLI datasets actually need to be constructed: near-top performance on long premises could be achievable by fine-tuning using short premises. Without multigranularity, benchmarks cannot distinguish lack of fine-tuning on long premises versus domain shift between short and long datasets. In contrast, our long and short premises share the same examples and domain. Models fine-tuned using several past NLI datasets and/or our short premises fall short of top performance on our long premises. So for at least certain domains (such as ours), large-scale long-premise datasets are needed. Second, LawngNLI can benchmark for implication-based retrieval. Queries are entailed or contradicted by target documents, allowing users to move between arguments and evidence. Leading retrieval models perform reasonably zero shot on a LawngNLI-derived retrieval task. We compare different systems for re-ranking, including lexical overlap and cross-encoders fine-tuned using a modified LawngNLI or past NLI datasets. LawngNLI can train and test systems for implication-based case retrieval and argumentation.", + "author": "William Bruno; Dan Roth", + "authorids": "/w/william-bruno/; /d/dan-roth/", + "bibtex": "@inproceedings{bruno-roth-2022-lawngnli,\n title = \"{L}awng{NLI}: A Long-Premise Benchmark for In-Domain Generalization from Short to Long Contexts and for Implication-Based Retrieval\",\n author = \"Bruno, William and\n Roth, Dan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.369/\",\n doi = \"10.18653/v1/2022.findings-emnlp.369\",\n pages = \"5019--5043\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.369.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.369/", + "pdf_size": 416515, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16713560177516549552&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff": "University of Pennsylvania; University of Pennsylvania", + "aff_domain": "seas.upenn.edu;seas.upenn.edu", + "email": "seas.upenn.edu;seas.upenn.edu", + "github": "", + "project": "http://cogcomp.org/page/publication_view/990", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Pennsylvania", + "aff_unique_dep": "", + "aff_unique_url": "https://www.upenn.edu", + "aff_unique_abbr": "UPenn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.364", + "title": "Learn What Is Possible, Then Choose What Is Best: Disentangling One-To-Many Relations in Language Through Text-based Games", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Language models pre-trained on large self-supervised corpora, followed by task-specific fine-tuning has become the dominant paradigm in NLP. These pre-training datasets often have a one-to-many structure\u2014e.g. in dialogue there are many valid responses for a given context. However, only some of these responses will be desirable in our downstream task. This raises the question of how we should train the model such that it can emulate the desirable behaviours, but not the undesirable ones. Current approaches train in a one-to-one setup\u2014only a single target response is given for a single dialogue context\u2014leading to models only learning to predict the average response, while ignoring the full range of possible responses. Using text-based games as a testbed, our approach, PASA, uses discrete latent variables to capture the range of different behaviours represented in our larger pre-training dataset. We then use knowledge distillation to distil the posterior probability distribution into a student model. This probability distribution is far richer than learning from only the hard targets of the dataset, and thus allows the student model to benefit from the richer range of actions the teacher model has learned. Results show up to 49% empirical improvement over the previous state-of-the-art model on the Jericho Walkthroughs dataset.", + "author": "Benjamin Towle; Ke Zhou", + "authorids": "/b/benjamin-towle/; /k/ke-zhou/", + "bibtex": "@inproceedings{towle-zhou-2022-learn,\n title = \"Learn What Is Possible, Then Choose What Is Best: Disentangling One-To-Many Relations in Language Through Text-based Games\",\n author = \"Towle, Benjamin and\n Zhou, Ke\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.364/\",\n doi = \"10.18653/v1/2022.findings-emnlp.364\",\n pages = \"4955--4965\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.364.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.364/", + "pdf_size": 662914, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7909240534612559871&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Nottingham; University of Nottingham + Nokia Bell Labs", + "aff_domain": "nottingham.ac.uk;nottingham.ac.uk", + "email": "nottingham.ac.uk;nottingham.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "University of Nottingham;Nokia Bell Labs", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.nottingham.ac.uk;https://www.nokialabs.com", + "aff_unique_abbr": "UoN;Nokia Bell Labs", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "2022.findings-emnlp.436", + "title": "Learning Action-Effect Dynamics for Hypothetical Vision-Language Reasoning Task", + "track": "main", + "status": "finding", + "award": false, + "abstract": "\u2018Actions\u2019 play a vital role in how humans interact with the world. Thus, autonomous agents that would assist us in everyday tasks also require the capability to perform \u2018Reasoning about Actions & Change\u2019 (RAC). This has been an important research direction in Artificial Intelligence (AI) in general, but the study of RAC with visual and linguistic inputs is relatively recent. The CLEVR_HYP (Sampat et. al., 2021) is one such testbed for hypothetical vision-language reasoning with actions as the key focus. In this work, we propose a novel learning strategy that can improve reasoning about the effects of actions. We implement an encoder-decoder architecture to learn the representation of actions as vectors. We combine the aforementioned encoder-decoder architecture with existing modality parsers and a scene graph question answering model to evaluate our proposed system on the CLEVR_HYP dataset. We conduct thorough experiments to demonstrate the effectiveness of our proposed approach and discuss its advantages over previous baselines in terms of performance, data efficiency, and generalization capability.", + "author": "Shailaja Keyur Sampat; Pratyay Banerjee; Yezhou Yang; Chitta Baral", + "authorids": "/s/shailaja-keyur-sampat/; /p/pratyay-banerjee/; /y/yezhou-yang/; /c/chitta-baral/", + "bibtex": "@inproceedings{sampat-etal-2022-learning,\n title = \"Learning Action-Effect Dynamics for Hypothetical Vision-Language Reasoning Task\",\n author = \"Sampat, Shailaja Keyur and\n Banerjee, Pratyay and\n Yang, Yezhou and\n Baral, Chitta\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.436/\",\n doi = \"10.18653/v1/2022.findings-emnlp.436\",\n pages = \"5914--5924\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.436.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.436/", + "pdf_size": 8326253, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17375642748151642183&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Arizona State University, USA; Arizona State University, USA; Arizona State University, USA; Arizona State University, USA", + "aff_domain": "asu.edu;asu.edu;asu.edu;asu.edu", + "email": "asu.edu;asu.edu;asu.edu;asu.edu", + "github": "https://github.com/shailaja183/ARL", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Arizona State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.asu.edu", + "aff_unique_abbr": "ASU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.243", + "title": "Learning Cooperative Interactions for Multi-Overlap Aspect Sentiment Triplet Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Aspect sentiment triplet extraction (ASTE) is an essential task, which aims to extract triplets(aspect, opinion, sentiment). However, overlapped triplets, especially multi-overlap triplets,make ASTE a challenge. Most existing methods suffer from multi-overlap triplets becausethey focus on the single interactions between an aspect and an opinion. To solve the aboveissues, we propose a novel multi-overlap triplet extraction method, which decodes the complexrelations between multiple aspects and opinions by learning their cooperative interactions. Overall, the method is based on an encoder-decoder architecture. During decoding, we design ajoint decoding mechanism, which employs a multi-channel strategy to generate aspects andopinions through the cooperative interactions between them jointly. Furthermore, we constructa correlation-enhanced network to reinforce the interactions between related aspectsand opinions for sentiment prediction. Besides, a relation-wise calibration scheme is adoptedto further improve performance. Experiments show that our method outperforms baselines,especially multi-overlap triplets.", + "author": "Shiman Zhao; Wei Chen; Tengjiao Wang", + "authorids": "/s/shiman-zhao/; /w/wei-chen/; /t/tengjiao-wang/", + "bibtex": "@inproceedings{zhao-etal-2022-learning-cooperative,\n title = \"Learning Cooperative Interactions for Multi-Overlap Aspect Sentiment Triplet Extraction\",\n author = \"Zhao, Shiman and\n Chen, Wei and\n Wang, Tengjiao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.243/\",\n doi = \"10.18653/v1/2022.findings-emnlp.243\",\n pages = \"3337--3347\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.243.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.243/", + "pdf_size": 447045, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12239656275946898064&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "School of Computer Science, Peking University, Beijing, China+Research Center for Computational Social Science, Peking University+Institute of Computational Social Science, Peking University (Qingdao); School of Computer Science, Peking University, Beijing, China+Research Center for Computational Social Science, Peking University+Institute of Computational Social Science, Peking University (Qingdao); School of Computer Science, Peking University, Beijing, China+Research Center for Computational Social Science, Peking University+Institute of Computational Social Science, Peking University (Qingdao)", + "aff_domain": "stu.pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "stu.pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0+0;0+0+0;0+0+0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "School of Computer Science", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "0+2;0+2;0+2", + "aff_campus_unique": "Beijing;;Qingdao", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.634", + "title": "Learning Cross-Task Dependencies for Joint Extraction of Entities, Events, Event Arguments, and Relations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Extracting entities, events, event arguments, and relations (i.e., task instances) from text represents four main challenging tasks in information extraction (IE), which have been solved jointly (JointIE) to boost the overall performance for IE. As such, previous work often leverages two types of dependencies between the tasks, i.e., cross-instance and cross-type dependencies representing relatedness between task instances and correlations between information types of the tasks. However, the cross-task dependencies in prior work are not optimal as they are only designed manually according to some task heuristics. To address this issue, we propose a novel model for JointIE that aims to learn cross-task dependencies from data. In particular, we treat each task instance as a node in a dependency graph where edges between the instances are inferred through information from different layers of a pretrained language model (e.g., BERT). Furthermore, we utilize the Chow-Liu algorithm to learn a dependency tree between information types for JointIE by seeking to approximate the joint distribution of the types from data. Finally, the Chow-Liu dependency tree is used to generate cross-type patterns, serving as anchor knowledge to guide the learning of representations and dependencies between instances for JointIE. Experimental results show that our proposed model significantly outperforms strong JointIE baselines over four datasets with different languages.", + "author": "Minh Van Nguyen; Bonan Min; Franck Dernoncourt; Thien Nguyen", + "authorids": "/m/minh-van-nguyen/; /b/bonan-min/; /f/franck-dernoncourt/; /t/thien-nguyen/", + "bibtex": "@inproceedings{nguyen-etal-2022-learning,\n title = \"Learning Cross-Task Dependencies for Joint Extraction of Entities, Events, Event Arguments, and Relations\",\n author = \"Nguyen, Minh Van and\n Min, Bonan and\n Dernoncourt, Franck and\n Nguyen, Thien\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.634/\",\n doi = \"10.18653/v1/2022.emnlp-main.634\",\n pages = \"9349--9360\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.634.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.634/", + "pdf_size": 452842, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17398199785419376457&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Dept. of Computer Science, University of Oregon, Eugene, OR, USA+VinAI Research, Vietnam; Amazon AWS AI Labs; Adobe Research, Seattle, WA, USA; Dept. of Computer Science, University of Oregon, Eugene, OR, USA+VinAI Research, Vietnam", + "aff_domain": "cs.uoregon.edu;amazon.com;adobe.com;cs.uoregon.edu", + "email": "cs.uoregon.edu;amazon.com;adobe.com;cs.uoregon.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;3;0+1", + "aff_unique_norm": "University of Oregon;VinAI Research;Amazon;Adobe Research", + "aff_unique_dep": "Department of Computer Science;;AWS AI Labs;", + "aff_unique_url": "https://www.uoregon.edu;https://www.vin.ai;https://aws.amazon.com;https://research.adobe.com", + "aff_unique_abbr": "UO;VinAI;Amazon;Adobe", + "aff_campus_unique_index": "0;2;0", + "aff_campus_unique": "Eugene;;Seattle", + "aff_country_unique_index": "0+1;0;0;0+1", + "aff_country_unique": "United States;Vietnam" + }, + { + "id": "2022.findings-emnlp.309", + "title": "Learning From the Source Document: Unsupervised Abstractive Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Most of the state-of-the-art methods for abstractive text summarization are under supervised learning settings, while heavily relying on high-quality and large-scale parallel corpora. In this paper, we remove the need for reference summaries and present an unsupervised learning method SCR (Summarize, Contrast and Review) for abstractive summarization, which leverages contrastive learning and is the first work to apply contrastive learning for unsupervised abstractive summarization. Particularly, we use the true source documents as positive source document examples, and strategically generated fake source documents as negative source document examples to train the model to generate good summaries. Furthermore, we consider and improve the writing quality of the generated summaries by guiding them to be similar to human-written texts. The promising results on extensive experiments show that SCR outperforms other unsupervised abstractive summarization baselines, which demonstrates its effectiveness.", + "author": "Haojie Zhuang; Wei Emma Zhang; Jian Yang; Congbo Ma; Yutong Qu; Quan Z. Sheng", + "authorids": "/h/haojie-zhuang/; /w/wei-emma-zhang/; /j/jian-yang/; /c/congbo-ma/; /y/yutong-qu/; /q/quan-z-sheng/", + "bibtex": "@inproceedings{zhuang-etal-2022-learning,\n title = \"Learning From the Source Document: Unsupervised Abstractive Summarization\",\n author = \"Zhuang, Haojie and\n Zhang, Wei Emma and\n Yang, Jian and\n Ma, Congbo and\n Qu, Yutong and\n Sheng, Quan Z.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.309/\",\n doi = \"10.18653/v1/2022.findings-emnlp.309\",\n pages = \"4194--4205\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.309.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.309/", + "pdf_size": 294464, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12352834968521986952&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "The University of Adelaide, Adelaide, Australia; The University of Adelaide, Adelaide, Australia; Macquarie University, Sydney, Australia; The University of Adelaide, Adelaide, Australia; The University of Adelaide, Adelaide, Australia; Macquarie University, Sydney, Australia", + "aff_domain": "adelaide.edu.au;adelaide.edu.au;mq.edu.au;adelaide.edu.au;adelaide.edu.au;mq.edu.au", + "email": "adelaide.edu.au;adelaide.edu.au;mq.edu.au;adelaide.edu.au;adelaide.edu.au;mq.edu.au", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;1", + "aff_unique_norm": "The University of Adelaide;Macquarie University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.adelaide.edu.au;https://www.mq.edu.au", + "aff_unique_abbr": "Adelaide;MQ", + "aff_campus_unique_index": "0;0;1;0;0;1", + "aff_campus_unique": "Adelaide;Sydney", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "2022.emnlp-industry.33", + "title": "Learning Geolocations for Cold-Start and Hard-to-Resolve Addresses via Deep Metric Learning", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "With evergrowing digital adoption in the society and increasing demand for businesses to deliver to customers doorstep, the last mile hop of transportation planning poses unique challenges in emerging geographies with unstructured addresses. One of the crucial inputs to facilitate effective planning is the task of geolocating customer addresses. Existing systems operate by aggregating historical delivery locations or by resolving/matching addresses to known buildings and campuses to vend a high-precision geolocation. However, by design they fail to cater to a significant fraction of addresses which are new in the system and have inaccurate or missing building level information. We propose a framework to resolve these addresses (referred to as hard-to-resolve henceforth) to a shallower granularity termed as neighbourhood. Specifically, we propose a weakly supervised deep metric learning model to encode the geospatial semantics in address embeddings. We present empirical evaluation on India (IN) and the United Arab Emirates (UAE) hard-to-resolve addresses to show significant improvements in learning geolocations i.e., 22% (IN) & 55% (UAE) reduction in delivery defects (where learnt geocode is Y meters away from actual location), and 43% (IN) & 90% (UAE) reduction in 50th percentile (p50) distance between learnt and actual delivery locations over the existing production system.", + "author": "Govind; Saurabh Sohoney", + "authorids": "/g/govind-kothari/; /s/saurabh-sohoney/", + "bibtex": "@inproceedings{govind-sohoney-2022-learning,\n title = \"Learning Geolocations for Cold-Start and Hard-to-Resolve Addresses via Deep Metric Learning\",\n author = \"Govind and\n Sohoney, Saurabh\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.33/\",\n doi = \"10.18653/v1/2022.emnlp-industry.33\",\n pages = \"322--331\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.33.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.33/", + "pdf_size": 4958971, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6061838632429411907&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "Amazon; Amazon", + "aff_domain": "amazon.com;amazon.com", + "email": "amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Amazon.com, Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.105", + "title": "Learning Instructions with Unlabeled Data for Zero-Shot Cross-Task Generalization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Training language models to learn from human instructions for zero-shot cross-task generalization has attracted much attention in NLP communities. Recently, instruction tuning (IT), which fine-tunes a pre-trained language model on a massive collection of tasks described via human-craft instructions, has been shown effective in instruction learning for unseen tasks. However, IT relies on a large amount of human-annotated samples, which restricts its generalization. Unlike labeled data, unlabeled data are often massive and cheap to obtain. In this work, we study how IT can be improved with unlabeled data. We first empirically explore the IT performance trends versus the number of labeled data, instructions, and training tasks. We find it critical to enlarge the number of training instructions, and the instructions can be underutilized due to the scarcity of labeled data. Then, we propose Unlabeled Data Augmented Instruction Tuning (UDIT) to take better advantage of the instructions during IT by constructing pseudo-labeled data from unlabeled plain texts. We conduct extensive experiments to show UDIT\u2019s effectiveness in various scenarios of tasks and datasets. We also comprehensively analyze the key factors of UDIT to investigate how to better improve IT with unlabeled data. The code is publicly available at https://github.com/thu-coai/UDIT.", + "author": "Yuxian Gu; Pei Ke; Xiaoyan Zhu; Minlie Huang", + "authorids": "/y/yuxian-gu/; /p/pei-ke/; /x/xiaoyan-zhu/; /m/minlie-huang/", + "bibtex": "@inproceedings{gu-etal-2022-learning,\n title = \"Learning Instructions with Unlabeled Data for Zero-Shot Cross-Task Generalization\",\n author = \"Gu, Yuxian and\n Ke, Pei and\n Zhu, Xiaoyan and\n Huang, Minlie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.105/\",\n doi = \"10.18653/v1/2022.emnlp-main.105\",\n pages = \"1617--1634\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.105.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.105/", + "pdf_size": 609202, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1234941901748814799&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "The CoAI group, Tsinghua University, Beijing, China; The CoAI group, Tsinghua University, Beijing, China; The CoAI group, Tsinghua University, Beijing, China; Institute for Artificial Intelligence, State Key Lab of Intelligent Technology and Systems, Beijing National Research Center for Information Science and Technology, Department of Computer Science and Technology, Tsinghua University, Beijing, China", + "aff_domain": "mails.tsinghua.edu.cn;outlook.com;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;outlook.com;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "https://github.com/thu-coai/UDIT", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Tsinghua University", + "aff_unique_dep": "The CoAI group", + "aff_unique_url": "https://www.tsinghua.edu.cn", + "aff_unique_abbr": "THU", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.524", + "title": "Learning Inter-Entity-Interaction for Few-Shot Knowledge Graph Completion", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Few-shot knowledge graph completion (FKGC) aims to infer unknown fact triples of a relation using its few-shot reference entity pairs. Recent FKGC studies focus on learning semantic representations of entity pairs by separately encoding the neighborhoods of head and tail entities. Such practice, however, ignores the inter-entity interaction, resulting in low-discrimination representations for entity pairs, especially when these entity pairs are associated with 1-to-N, N-to-1, and N-to-N relations. To address this issue, this paper proposes a novel FKGC model, named Cross-Interaction Attention Network (CIAN) to investigate the inter-entity interaction between head and tail entities. Specifically, we first explore the interactions within entities by computing the attention between the task relation and each entity neighbor, and then model the interactions between head and tail entities by letting an entity to attend to the neighborhood of its paired entity. In this way, CIAN can figure out the relevant semantics between head and tail entities, thereby generating more discriminative representations for entity pairs. Extensive experiments on two public datasets show that CIAN outperforms several state-of-the-art methods. The source code is available at https://github.com/cjlyl/FKGC-CIAN.", + "author": "Yuling Li; Kui Yu; Xiaoling Huang; Yuhong Zhang", + "authorids": "/y/yuling-li/; /k/kui-yu/; /x/xiaoling-huang/; /y/yuhong-zhang/", + "bibtex": "@inproceedings{li-etal-2022-learning-inter,\n title = \"Learning Inter-Entity-Interaction for Few-Shot Knowledge Graph Completion\",\n author = \"Li, Yuling and\n Yu, Kui and\n Huang, Xiaoling and\n Zhang, Yuhong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.524/\",\n doi = \"10.18653/v1/2022.emnlp-main.524\",\n pages = \"7691--7700\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.524.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.524/", + "pdf_size": 5537787, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=669260403379740806&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Key Laboratory of Knowledge Engineering with Big Data, Ministry of Education, Hefei, China+School of Computer Science and Information Enginerring, Hefei University of Technology; Key Laboratory of Knowledge Engineering with Big Data, Ministry of Education, Hefei, China+School of Computer Science and Information Enginerring, Hefei University of Technology; Key Laboratory of Knowledge Engineering with Big Data, Ministry of Education, Hefei, China+School of Computer Science and Information Enginerring, Hefei University of Technology; Key Laboratory of Knowledge Engineering with Big Data, Ministry of Education, Hefei, China+School of Computer Science and Information Enginerring, Hefei University of Technology", + "aff_domain": "mail.hfut.edu.cn;hfut.edu.cn;chzu.edu.cn;hfut.edu.cn", + "email": "mail.hfut.edu.cn;hfut.edu.cn;chzu.edu.cn;hfut.edu.cn", + "github": "https://github.com/cjlyl/FKGC-CIAN", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0+1;0+1", + "aff_unique_norm": "Key Laboratory of Knowledge Engineering with Big Data;Hefei University of Technology", + "aff_unique_dep": "Ministry of Education;School of Computer Science and Information Engineering", + "aff_unique_url": ";http://www.hfut.edu.cn/", + "aff_unique_abbr": ";HUT", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Hefei", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.241", + "title": "Learning Invariant Representation Improves Robustness for MRC Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The prosperity of Pretrained Language Models(PLM) has greatly promoted the development of Machine Reading Comprehension (MRC). However, these models are vulnerable and not robust to adversarial examples. In this paper, we propose Stable and Contrastive Question Answering (SCQA) to improve invariance of representation to alleviate these robustness issues. Specifically, we first construct positive example pairs which have same answer through data augmentation. Then SCQA learns enhanced representations with better alignment between positive pairs by introducing stability and contrastive loss. Experimental results show that our approach can boost the robustness of QA models cross different MRC tasks and attack sets significantly and consistently.", + "author": "Yu Hai; Liang Wen; Haoran Meng; Tianyu Liu; Houfeng Wang", + "authorids": "/y/yu-hai/; /l/liang-wen/; /h/haoran-meng/; /t/tianyu-liu/; /h/houfeng-wang/", + "bibtex": "@inproceedings{hai-etal-2022-learning,\n title = \"Learning Invariant Representation Improves Robustness for {MRC} Models\",\n author = \"Hai, Yu and\n Wen, Liang and\n Meng, Haoran and\n Liu, Tianyu and\n Wang, Houfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.241/\",\n doi = \"10.18653/v1/2022.findings-emnlp.241\",\n pages = \"3306--3314\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.241.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.241/", + "pdf_size": 349471, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17031989240407355288&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "MOE Key Laboratory of Computational Linguistics, Peking University, China; MOE Key Laboratory of Computational Linguistics, Peking University, China; MOE Key Laboratory of Computational Linguistics, Peking University, China; Tencent Cloud Xiaowei; MOE Key Laboratory of Computational Linguistics, Peking University, China", + "aff_domain": "pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;tencent.com;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;stu.pku.edu.cn;tencent.com;pku.edu.cn", + "github": "https://github.com/haiahaiah/SCQA", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Peking University;Tencent", + "aff_unique_dep": "MOE Key Laboratory of Computational Linguistics;Tencent Cloud Xiaowei", + "aff_unique_url": "http://www.pku.edu.cn;https://cloud.tencent.com", + "aff_unique_abbr": "PKU;Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.109", + "title": "Learning Label Modular Prompts for Text Classification in the Wild", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Machine learning models usually assume i.i.d data during training and testing, but data and tasks in real world often change over time. To emulate the transient nature of real world, we propose a challenging but practical task: text classification in-the-wild, which introduces different non-stationary training/testing stages. Decomposing a complex task into modular components can enable robust generalisation under such non-stationary environment. However, current modular approaches in NLP do not take advantage of recent advances in parameter efficient tuning of pretrained language models. To close this gap, we propose ModularPrompt, a label-modular prompt tuning framework for text classification tasks. In ModularPrompt, the input prompt consists of a sequence of soft label prompts, each encoding modular knowledge related to the corresponding class label. In two of most formidable settings, ModularPrompt outperforms relevant baselines by a large margin demonstrating strong generalisation ability. We also conduct comprehensive analysis to validate whether the learned prompts satisfy properties of a modular representation.", + "author": "Hailin Chen; Amrita Saha; Shafiq Joty; Steven C.H. Hoi", + "authorids": "/h/hailin-chen/; /a/amrita-saha/; /s/shafiq-joty/; /s/steven-c-h-hoi/", + "bibtex": "@inproceedings{chen-etal-2022-learning-label,\n title = \"Learning Label Modular Prompts for Text Classification in the Wild\",\n author = \"Chen, Hailin and\n Saha, Amrita and\n Joty, Shafiq and\n Hoi, Steven C.H.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.109/\",\n doi = \"10.18653/v1/2022.emnlp-main.109\",\n pages = \"1677--1690\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.109.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.109/", + "pdf_size": 1206519, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2670274618204940486&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Nanyang Technological University, Singapore+Salesforce Research; Salesforce Research; Nanyang Technological University, Singapore+Salesforce Research; Salesforce Research", + "aff_domain": "ntu.edu.sg;salesforce.com;ntu.edu.sg;salesforce.com", + "email": "ntu.edu.sg;salesforce.com;ntu.edu.sg;salesforce.com", + "github": "https://github.com/salesforce/ModularPrompt", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;0+1;1", + "aff_unique_norm": "Nanyang Technological University;Salesforce", + "aff_unique_dep": ";Salesforce Research", + "aff_unique_url": "https://www.ntu.edu.sg;https://research.salesforce.com", + "aff_unique_abbr": "NTU;Salesforce", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;0+1;1", + "aff_country_unique": "Singapore;United States" + }, + { + "id": "2022.emnlp-main.420", + "title": "Learning Robust Representations for Continual Relation Extraction via Adversarial Class Augmentation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Continual relation extraction (CRE) aims to continually learn new relations from a class-incremental data stream. CRE model usually suffers from catastrophic forgetting problem, i.e., the performance of old relations seriously degrades when the model learns new relations. Most previous work attributes catastrophic forgetting to the corruption of the learned representations as new relations come, with an implicit assumption that the CRE models have adequately learned the old relations. In this paper, through empirical studies we argue that this assumption may not hold, and an important reason for catastrophic forgetting is that the learned representations do not have good robustness against the appearance of analogous relations in the subsequent learning process. To address this issue, we encourage the model to learn more precise and robust representations through a simple yet effective adversarial class augmentation mechanism (ACA), which is easy to implement and model-agnostic.Experimental results show that ACA can consistently improve the performance of state-of-the-art CRE models on two popular benchmarks.", + "author": "Peiyi Wang; Yifan Song; Tianyu Liu; Binghuai Lin; Yunbo Cao; Sujian Li; Zhifang Sui", + "authorids": "/p/peiyi-wang/; /y/yifan-song/; /t/tianyu-liu/; /b/binghuai-lin/; /y/yunbo-cao/; /s/sujian-li/; /z/zhifang-sui/", + "bibtex": "@inproceedings{wang-etal-2022-learning-robust,\n title = \"Learning Robust Representations for Continual Relation Extraction via Adversarial Class Augmentation\",\n author = \"Wang, Peiyi and\n Song, Yifan and\n Liu, Tianyu and\n Lin, Binghuai and\n Cao, Yunbo and\n Li, Sujian and\n Sui, Zhifang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.420/\",\n doi = \"10.18653/v1/2022.emnlp-main.420\",\n pages = \"6264--6278\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.420.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.420/", + "pdf_size": 959778, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12987406932131523565&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "MOE Key Laboratory of Computational Linguistics, Peking University, China; MOE Key Laboratory of Computational Linguistics, Peking University, China; Tencent Cloud Xiaowei; Tencent Cloud Xiaowei; Tencent Cloud Xiaowei; MOE Key Laboratory of Computational Linguistics, Peking University, China; MOE Key Laboratory of Computational Linguistics, Peking University, China", + "aff_domain": "gmail.com;pku.edu.cn;pku.edu.cn;pku.edu.cn;tencent.com;tencent.com;tencent.com", + "email": "gmail.com;pku.edu.cn;pku.edu.cn;pku.edu.cn;tencent.com;tencent.com;tencent.com", + "github": "https://github.com/Wangpeiyi9979/ACA", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;1;1;0;0", + "aff_unique_norm": "Peking University;Tencent", + "aff_unique_dep": "MOE Key Laboratory of Computational Linguistics;Tencent Cloud Xiaowei", + "aff_unique_url": "http://www.pku.edu.cn;https://cloud.tencent.com", + "aff_unique_abbr": "PKU;Tencent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.328", + "title": "Learning Semantic Textual Similarity via Topic-informed Discrete Latent Variables", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently, discrete latent variable models have received a surge of interest in both Natural Language Processing (NLP) and Computer Vision (CV), attributed to their comparable performance to the continuous counterparts in representation learning, while being more interpretable in their predictions. In this paper, we develop a topic-informed discrete latent variable model for semantic textual similarity, which learns a shared latent space for sentence-pair representation via vector quantization. Compared with previous models limited to local semantic contexts, our model can explore richer semantic information via topic modeling. We further boost the performance of semantic similarity by injecting the quantized representation into a transformer-based language model with a well-designed semantic-driven attention mechanism. We demonstrate, through extensive experiments across various English language datasets, that our model is able to surpass several strong neural baselines in semantic textual similarity tasks.", + "author": "Erxin Yu; Lan Du; Yuan Jin; Zhepei Wei; Yi Chang", + "authorids": "/e/erxin-yu/; /l/lan-du/; /y/yuan-jin/; /z/zhepei-wei/; /y/yi-chang/", + "bibtex": "@inproceedings{yu-etal-2022-learning,\n title = \"Learning Semantic Textual Similarity via Topic-informed Discrete Latent Variables\",\n author = \"Yu, Erxin and\n Du, Lan and\n Jin, Yuan and\n Wei, Zhepei and\n Chang, Yi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.328/\",\n doi = \"10.18653/v1/2022.emnlp-main.328\",\n pages = \"4937--4948\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.328.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.328/", + "pdf_size": 554862, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2863436362769036314&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "School of Artificial Intelligence, Jilin University + Key Laboratory of Symbolic Computation and Knowledge Engineering, Jilin University + International Center of Future Science, Jilin University; Faculty of Information Technology, Monash University, Australia; Faculty of Information Technology, Monash University, Australia; School of Artificial Intelligence, Jilin University + Key Laboratory of Symbolic Computation and Knowledge Engineering, Jilin University + International Center of Future Science, Jilin University; School of Artificial Intelligence, Jilin University + Key Laboratory of Symbolic Computation and Knowledge Engineering, Jilin University + International Center of Future Science, Jilin University", + "aff_domain": "outlook.com;monash.edu;monash.edu;mails.jlu.edu.cn;jlu.edu.cn", + "email": "outlook.com;monash.edu;monash.edu;mails.jlu.edu.cn;jlu.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0+0;1;1;0+0+0;0+0+0", + "aff_unique_norm": "Jilin University;Monash University", + "aff_unique_dep": "School of Artificial Intelligence;Faculty of Information Technology", + "aff_unique_url": "http://www.jlu.edu.cn;https://www.monash.edu", + "aff_unique_abbr": "JLU;Monash", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;1;1;0+0+0;0+0+0", + "aff_country_unique": "China;Australia" + }, + { + "id": "2022.findings-emnlp.225", + "title": "Learning When and What to Quote: A Quotation Recommender System with Mutual Promotion of Recommendation and Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This work extends the current quotation recommendation task to a more realistic quotation recommender system that learns to predict when to quote and what to quote jointly. The system consists of three modules (tasks), a prediction module to predict whether to quote given conversation contexts, a recommendation module to recommend suitable quotations and a generation module generating quotations or sentences in ordinary language to continue the conversation. We benchmark several competitive models for the two newly introduced tasks (i.e., when-to-quote and what-to-continue). For quotation recommendation, compared with previous work that is either generation-based or ranking-based recommendation, we propose a novel framework with mutual promotion of generation module and ranking-based recommendation module. Experiments show that our framework achieves significantly better performance than baselines on two datasets. Further experiments and analyses validate the effectiveness of the proposed mechanisms and get a better understanding of the quotation recommendation task.", + "author": "Lingzhi Wang; Xingshan Zeng; Kam-Fai Wong", + "authorids": "/l/lingzhi-wang/; /x/xingshan-zeng/; /k/kam-fai-wong/", + "bibtex": "@inproceedings{wang-etal-2022-learning-quote,\n title = \"Learning When and What to Quote: A Quotation Recommender System with Mutual Promotion of Recommendation and Generation\",\n author = \"Wang, Lingzhi and\n Zeng, Xingshan and\n Wong, Kam-Fai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.225/\",\n doi = \"10.18653/v1/2022.findings-emnlp.225\",\n pages = \"3094--3105\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.225.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.225/", + "pdf_size": 765807, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3085218277408176718&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "The Chinese University of Hong Kong, Hong Kong, China + MoE Key Laboratory of High Confidence Software Technologies, China; ; The Chinese University of Hong Kong, Hong Kong, China + MoE Key Laboratory of High Confidence Software Technologies, China", + "aff_domain": "se.cuhk.edu.hk;gmail.com;se.cuhk.edu.hk", + "email": "se.cuhk.edu.hk;gmail.com;se.cuhk.edu.hk", + "github": "https://github.com/Lingzhi-WANG/GenRecMutualPromo", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "The Chinese University of Hong Kong;MoE Key Laboratory of High Confidence Software Technologies", + "aff_unique_dep": ";High Confidence Software Technologies", + "aff_unique_url": "https://www.cuhk.edu.hk;", + "aff_unique_abbr": "CUHK;", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Hong Kong;", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.16", + "title": "Learning a Grammar Inducer from Massive Uncurated Instructional Videos", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Video-aided grammar induction aims to leverage video information for finding more accurate syntactic grammars for accompanying text. While previous work focuses on building systems for inducing grammars on text that are well-aligned with video content, we investigate the scenario, in which text and video are only in loose correspondence. Such data can be found in abundance online, and the weak correspondence is similar to the indeterminacy problem studied in language acquisition. Furthermore, we build a new model that can better learn video-span correlation without manually designed features adopted by previous work. Experiments show that our model trained only on large-scale YouTube data with no text-video alignment reports strong and robust performances across three unseen datasets, despite domain shift and noisy label issues. Furthermore our model yields higher F1 scores than the previous state-of-the-art systems trained on in-domain data.", + "author": "Songyang Zhang; Linfeng Song; Lifeng Jin; Haitao Mi; Kun Xu; Dong Yu; Jiebo Luo", + "authorids": "/s/songyang-zhang/; /l/linfeng-song/; /l/lifeng-jin/; /h/haitao-mi/; /k/kun-xu/; /d/dong-yu/; /j/jiebo-luo/", + "bibtex": "@inproceedings{zhang-etal-2022-learning-grammar,\n title = \"Learning a Grammar Inducer from Massive Uncurated Instructional Videos\",\n author = \"Zhang, Songyang and\n Song, Linfeng and\n Jin, Lifeng and\n Mi, Haitao and\n Xu, Kun and\n Yu, Dong and\n Luo, Jiebo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.16/\",\n doi = \"10.18653/v1/2022.emnlp-main.16\",\n pages = \"233--247\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.16.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.16/", + "pdf_size": 4162854, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6798139410979432483&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "University of Rochester, Rochester, NY, USA+Tencent AI Lab, Bellevue, WA, USA; Tencent AI Lab, Bellevue, WA, USA; Tencent AI Lab, Bellevue, WA, USA; Tencent AI Lab, Bellevue, WA, USA; Tencent AI Lab, Bellevue, WA, USA; Tencent AI Lab, Bellevue, WA, USA; University of Rochester, Rochester, NY, USA", + "aff_domain": "ur.rochester.edu;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;cs.rochester.edu", + "email": "ur.rochester.edu;tencent.com;tencent.com;tencent.com;tencent.com;tencent.com;cs.rochester.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1;1;1;1;1;0", + "aff_unique_norm": "University of Rochester;Tencent", + "aff_unique_dep": ";AI Lab", + "aff_unique_url": "https://www.rochester.edu;https://ai.tencent.com", + "aff_unique_abbr": "U of R;Tencent AI Lab", + "aff_campus_unique_index": "0+1;1;1;1;1;1;0", + "aff_campus_unique": "Rochester;Bellevue", + "aff_country_unique_index": "0+0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.18", + "title": "Learning from the Dictionary: Heterogeneous Knowledge Guided Fine-tuning for Chinese Spell Checking", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Chinese Spell Checking (CSC) aims to detect and correct Chinese spelling errors. Recent researches start from the pretrained knowledge of language models and take multimodal information into CSC models to improve the performance. However, they overlook the rich knowledge in the dictionary, the reference book where one can learn how one character should be pronounced, written, and used. In this paper, we propose the LEAD framework, which renders the CSC model to learn heterogeneous knowledge from the dictionary in terms of phonetics, vision, and meaning. LEAD first constructs positive and negative samples according to the knowledge of character phonetics, glyphs, and definitions in the dictionary. Then a unified contrastive learning-based training scheme is employed to refine the representations of the CSC models. Extensive experiments and detailed analyses on the SIGHAN benchmark datasets demonstrate the effectiveness of our proposed methods.", + "author": "Yinghui Li; Shirong Ma; Qingyu Zhou; Zhongli Li; Li Yangning; Shulin Huang; Ruiyang Liu; Chao Li; Yunbo Cao; Haitao Zheng", + "authorids": "/y/yinghui-li/; /s/shirong-ma/; /q/qingyu-zhou/; /z/zhongli-li/; /l/li-yangning/; /s/shulin-huang/; /r/ruiyang-liu/; /c/chao-li/; /y/yunbo-cao/; /h/haitao-zheng/", + "bibtex": "@inproceedings{li-etal-2022-learning-dictionary,\n title = \"Learning from the Dictionary: Heterogeneous Knowledge Guided Fine-tuning for {C}hinese Spell Checking\",\n author = \"Li, Yinghui and\n Ma, Shirong and\n Zhou, Qingyu and\n Li, Zhongli and\n Yangning, Li and\n Huang, Shulin and\n Liu, Ruiyang and\n Li, Chao and\n Cao, Yunbo and\n Zheng, Haitao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.18/\",\n doi = \"10.18653/v1/2022.findings-emnlp.18\",\n pages = \"238--249\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.18.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.18/", + "pdf_size": 1585719, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8637640525808407897&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Tsinghua Shenzhen International Graduate School, Tsinghua University; Tsinghua Shenzhen International Graduate School, Tsinghua University; Tencent Cloud Xiaowei; Tencent Cloud Xiaowei; Tsinghua Shenzhen International Graduate School, Tsinghua University; Tsinghua Shenzhen International Graduate School, Tsinghua University; Department of Computer Science and Technology, Tsinghua University; Xiaomi Group; Tencent Cloud Xiaowei; Tsinghua Shenzhen International Graduate School, Tsinghua University+Peng Cheng Laboratory", + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;tencent.com; ; ; ; ; ; ;sz.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;tencent.com; ; ; ; ; ; ;sz.tsinghua.edu.cn", + "github": "https://github.com/geekjuruo/LEAD", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;1;1;0;0;0;2;1;0+3", + "aff_unique_norm": "Tsinghua University;Tencent;Xiaomi Corporation;Peng Cheng Laboratory", + "aff_unique_dep": "International Graduate School;Tencent Cloud Xiaowei;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://cloud.tencent.com;https://www.xiaomi.com;http://www.pcl.ac.cn", + "aff_unique_abbr": "THU;Tencent;Xiaomi;PCL", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.66", + "title": "Learning to Adapt to Low-Resource Paraphrase Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Paraphrase generation is a longstanding NLP task and achieves great success with the aid of large corpora. However, transferring a paraphrasing model to another domain encounters the problem of domain shifting especially when the data is sparse. At the same time, widely using large pre-trained language models (PLMs) faces the overfitting problem when training on scarce labeled data. To mitigate these two issues, we propose, LAPA, an effective adapter for PLMs optimized by meta-learning. LAPA has three-stage training on three types of related resources to solve this problem: 1. pre-training PLMs on unsupervised corpora, 2. inserting an adapter layer and meta-training on source domain labeled data, and 3. fine-tuning adapters on a small amount of target domain labeled data. This method enables paraphrase generation models to learn basic language knowledge first, then learn the paraphrasing task itself later, and finally adapt to the target task. Our experimental results demonstrate that LAPA achieves state-of-the-art in supervised, unsupervised, and low-resource settings on three benchmark datasets. With only 2% of trainable parameters and 1% labeled data of the target task, our approach can achieve a competitive performance with previous work.", + "author": "Zhigen Li; Yanmeng Wang; Rizhao Fan; Ye Wang; Jianfeng Li; Shaojun Wang", + "authorids": "/z/zhigen-li/; /y/yanmeng-wang/; /r/rizhao-fan/; /y/ye-wang/; /j/jianfeng-li/; /s/shaojun-wang/", + "bibtex": "@inproceedings{li-etal-2022-learning-adapt,\n title = \"Learning to Adapt to Low-Resource Paraphrase Generation\",\n author = \"Li, Zhigen and\n Wang, Yanmeng and\n Fan, Rizhao and\n Wang, Ye and\n Li, Jianfeng and\n Wang, Shaojun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.66/\",\n doi = \"10.18653/v1/2022.emnlp-main.66\",\n pages = \"1014--1022\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.66.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.66/", + "pdf_size": 747719, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=652658832282230023&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Ping An Technology; Ping An Technology; University of Bologna; Ping An Technology; Ping An Technology; Ping An Technology", + "aff_domain": "pingan.com.cn;pingan.com.cn;unibo.it;pingan.com.cn;pingan.com.cn;pingan.com.cn", + "email": "pingan.com.cn;pingan.com.cn;unibo.it;pingan.com.cn;pingan.com.cn;pingan.com.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0;0", + "aff_unique_norm": "Ping An Technology;University of Bologna", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.pingan.com;https://www.unibo.it", + "aff_unique_abbr": "Ping An;Unibo", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "China;Italy" + }, + { + "id": "2022.emnlp-main.142", + "title": "Learning to Decompose: Hypothetical Question Decomposition Based on Comparable Texts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Explicit decomposition modeling, which involves breaking down complex tasks into more straightforward and often more interpretable sub-tasks, has long been a central theme in developing robust and interpretable NLU systems. However, despite the many datasets and resources built as part of this effort, the majority have small-scale annotations and limited scope, which is insufficient to solve general decomposition tasks. In this paper, we look at large-scale intermediate pre-training of decomposition-based transformers using distant supervision from comparable texts, particularly large-scale parallel news. We show that with such intermediate pre-training, developing robust decomposition-based models for a diverse range of tasks becomes more feasible. For example, on semantic parsing, our model, DecompT5, improves 20% to 30% on two datasets, Overnight and TORQUE, over the baseline language model. We further use DecompT5 to build a novel decomposition-based QA system named DecompEntail, improving over state-of-the-art models, including GPT-3, on both HotpotQA and StrategyQA by 8% and 4%, respectively.", + "author": "Ben Zhou; Kyle Richardson; Xiaodong Yu; Dan Roth", + "authorids": "/b/ben-zhou/; /k/kyle-richardson/; /x/xiaodong-yu/; /d/dan-roth/", + "bibtex": "@inproceedings{zhou-etal-2022-learning-decompose,\n title = \"Learning to Decompose: Hypothetical Question Decomposition Based on Comparable Texts\",\n author = \"Zhou, Ben and\n Richardson, Kyle and\n Yu, Xiaodong and\n Roth, Dan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.142/\",\n doi = \"10.18653/v1/2022.emnlp-main.142\",\n pages = \"2223--2235\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.142.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.142/", + "pdf_size": 358747, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18062992435023770334&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "University of Pennsylvania; Allen Institute for AI; University of Pennsylvania; University of Pennsylvania", + "aff_domain": "seas.upenn.edu;allenai.org;seas.upenn.edu;seas.upenn.edu", + "email": "seas.upenn.edu;allenai.org;seas.upenn.edu;seas.upenn.edu", + "github": "", + "project": "http://cogcomp.org/page/publication_view/9922223", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of Pennsylvania;Allen Institute for AI", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.upenn.edu;https://allenai.org", + "aff_unique_abbr": "UPenn;AI2", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.426", + "title": "Learning to Detect Noisy Labels Using Model-Based Features", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Label noise is ubiquitous in various machine learning scenarios such as self-labeling with model predictions and erroneous data annotation. Many existing approaches are based on heuristics such as sample losses, which might not be flexible enough to achieve optimal solutions. Meta learning based methods address this issue by learning a data selection function, but can be hard to optimize. In light of these pros and cons, we propose SENT (Selection-Enhanced Noisy label Training) that does not rely on meta learning while having the flexibility of being data-driven. SENT transfers the noise distribution to a clean set and trains a model to distinguish noisy labels from clean ones using model-based features. Empirically, on a wide range of tasks including text classification and speech recognition, SENT improves performance over strong baselines under the settings of self-training and label corruption.", + "author": "Zhihao Wang; Zongyu Lin; Junjie Wen; Xianxin Chen; Peiqi Liu; Guidong Zheng; Yujun Chen; Zhilin Yang", + "authorids": "/z/zhihao-wang/; /z/zongyu-lin/; /j/junjie-wen/; /x/xianxin-chen/; /p/peiqi-liu/; /g/guidong-zheng/; /y/yujun-chen/; /z/zhilin-yang/", + "bibtex": "@inproceedings{wang-etal-2022-learning-detect,\n title = \"Learning to Detect Noisy Labels Using Model-Based Features\",\n author = \"Wang, Zhihao and\n Lin, Zongyu and\n Wen, Junjie and\n Chen, Xianxin and\n Liu, Peiqi and\n Zheng, Guidong and\n Chen, Yujun and\n Yang, Zhilin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.426/\",\n doi = \"10.18653/v1/2022.findings-emnlp.426\",\n pages = \"5796--5808\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.426.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.426/", + "pdf_size": 991011, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17514472190866202890&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": "Recurrent AI+Meta; Tsinghua University; China Merchants Bank; China Merchants Bank; China Merchants Bank; Recurrent AI; Recurrent AI; Tsinghua University+Meta", + "aff_domain": "meta.com;tsinghua.edu.cn;cmbchina.com;cmbchina.com;cmbchina.com;rcrai.com;rcrai.com;tsinghua.edu.cn", + "email": "meta.com;tsinghua.edu.cn;cmbchina.com;cmbchina.com;cmbchina.com;rcrai.com;rcrai.com;tsinghua.edu.cn", + "github": "https://github.com/Rafa-zy/SENT", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;2;3;3;3;0;0;2+1", + "aff_unique_norm": "Recurrent AI;Meta Platforms, Inc.;Tsinghua University;China Merchants Bank", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.recurrent.ai;https://meta.com;https://www.tsinghua.edu.cn;https://www.cmbchina.com.cn", + "aff_unique_abbr": "Recurrent AI;Meta;THU;CMB", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;1;1;1;1;0;0;1+0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.573", + "title": "Learning to Explain Selectively: A Case Study on Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Explanations promise to bridge the gap between humans and AI, yet it remains difficult to achieve consistent improvement in AI-augmented human decision making. The usefulness of AI explanations depends on many factors, and always showing the same type of explanation in all cases is suboptimal\u2014so is relying on heuristics to adapt explanations for each scenario. We propose learning to explain\u201dselectively\u201d: for each decision that the user makes, we use a model to choose the best explanation from a set of candidates and update this model with feedback to optimize human performance. We experiment on a question answering task, Quizbowl, and show that selective explanations improve human performance for both experts and crowdworkers.", + "author": "Shi Feng; Jordan Boyd-Graber", + "authorids": "/s/shi-feng/; /j/jordan-boyd-graber/", + "bibtex": "@inproceedings{feng-boyd-graber-2022-learning,\n title = \"Learning to Explain Selectively: A Case Study on Question Answering\",\n author = \"Feng, Shi and\n Boyd-Graber, Jordan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.573/\",\n doi = \"10.18653/v1/2022.emnlp-main.573\",\n pages = \"8372--8382\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.573.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.573/", + "pdf_size": 658132, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12809591386639016270&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff": "University of Chicago; University of Maryland", + "aff_domain": "uchicago.edu;umiacs.umd.edu", + "email": "uchicago.edu;umiacs.umd.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Chicago;University of Maryland", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uchicago.edu;https://www/umd.edu", + "aff_unique_abbr": "UChicago;UMD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.807", + "title": "Learning to Generate Overlap Summaries through Noisy Synthetic Data", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Semantic Overlap Summarization (SOS) is a novel and relatively under-explored seq-to-seq task which entails summarizing common information from multiple alternate narratives. One of the major challenges for solving this task is the lack of existing datasets for supervised training. To address this challenge, we propose a novel data augmentation technique, which allows us to create large amount of synthetic data for training a seq-to-seq model that can perform the SOS task. Through extensive experiments using narratives from the news domain, we show that the models fine-tuned using the synthetic dataset provide significant performance improvements over the pre-trained vanilla summarization techniques and are close to the models fine-tuned on the golden training data; which essentially demonstrates the effectiveness of out proposed data augmentation technique for training seq-to-seq models on the SOS task.", + "author": "Naman Bansal; Mousumi Akter; Shubhra Kanti Karmaker Santu", + "authorids": "/n/naman-bansal/; /m/mousumi-akter/; /s/shubhra-kanti-karmaker-santu/", + "bibtex": "@inproceedings{bansal-etal-2022-learning,\n title = \"Learning to Generate Overlap Summaries through Noisy Synthetic Data\",\n author = \"Bansal, Naman and\n Akter, Mousumi and\n Karmaker Santu, Shubhra Kanti\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.807/\",\n doi = \"10.18653/v1/2022.emnlp-main.807\",\n pages = \"11765--11777\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.807.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.807/", + "pdf_size": 668064, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6282797776369483972&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Big Data Intelligence (BDI) Lab, Department of Computer Science and Software Engineering, College of Engineering, Auburn University; Big Data Intelligence (BDI) Lab, Department of Computer Science and Software Engineering, College of Engineering, Auburn University; Big Data Intelligence (BDI) Lab, Department of Computer Science and Software Engineering, College of Engineering, Auburn University", + "aff_domain": "auburn.edu;auburn.edu;auburn.edu", + "email": "auburn.edu;auburn.edu;auburn.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Auburn University", + "aff_unique_dep": "Department of Computer Science and Software Engineering", + "aff_unique_url": "https://www.auburn.edu", + "aff_unique_abbr": "Auburn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.4", + "title": "Learning to Generate Question by Asking Question: A Primal-Dual Approach with Uncommon Word Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Automatic question generation (AQG) is the task of generating a question from a given passage and an answer. Most existing AQG methods aim at encoding the passage and the answer to generate the question. However, limited work has focused on modeling the correlation between the target answer and the generated question. Moreover, unseen or rare word generation has not been studied in previous works. In this paper, we propose a novel approach which incorporates question generation with its dual problem, question answering, into a unified primal-dual framework. Specifically, the question generation component consists of an encoder that jointly encodes the answer with the passage, and a decoder that produces the question. The question answering component then re-asks the generated question on the passage to ensure that the target answer is obtained. We further introduce a knowledge distillation module to improve the model generalization ability. We conduct an extensive set of experiments on SQuAD and HotpotQA benchmarks. Experimental results demonstrate the superior performance of the proposed approach over several state-of-the-art methods.", + "author": "Qifan Wang; Li Yang; Xiaojun Quan; Fuli Feng; Dongfang Liu; Zenglin Xu; Sinong Wang; Hao Ma", + "authorids": "/q/qifan-wang/; /l/li-yang/; /x/xiaojun-quan/; /f/fuli-feng/; /d/dongfang-liu/; /z/zenglin-xu/; /s/sinong-wang/; /h/hao-ma/", + "bibtex": "@inproceedings{wang-etal-2022-learning-generate,\n title = \"Learning to Generate Question by Asking Question: A Primal-Dual Approach with Uncommon Word Generation\",\n author = \"Wang, Qifan and\n Yang, Li and\n Quan, Xiaojun and\n Feng, Fuli and\n Liu, Dongfang and\n Xu, Zenglin and\n Wang, Sinong and\n Ma, Hao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.4/\",\n doi = \"10.18653/v1/2022.emnlp-main.4\",\n pages = \"46--61\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.4.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.4/", + "pdf_size": 953542, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8151323095118432625&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 0, + "aff": "Meta AI; Google Research; Sun Yat-sen University; University of Science and Technology of China; Rochester Institute of Technology; Harbin Institute of Technology; Meta AI; Meta AI", + "aff_domain": "fb.com;google.com; ; ; ; ; ; ", + "email": "fb.com;google.com; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;3;4;5;0;0", + "aff_unique_norm": "Meta Platforms, Inc.;Google;Sun Yat-sen University;University of Science and Technology of China;Rochester Institute of Technology;Harbin Institute of Technology", + "aff_unique_dep": "Meta AI;Google Research;;;;", + "aff_unique_url": "https://meta.com;https://research.google;http://www.sysu.edu.cn/;http://www.ustc.edu.cn;https://www.rit.edu;http://www.hit.edu.cn/", + "aff_unique_abbr": "Meta;Google Research;SYSU;USTC;RIT;HIT", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Mountain View;Harbin", + "aff_country_unique_index": "0;0;1;1;0;1;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.351", + "title": "Learning to Infer from Unlabeled Data: A Semi-supervised Learning Approach for Robust Natural Language Inference", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Natural Language Inference (NLI) or Recognizing Textual Entailment (RTE) aims at predicting the relation between a pair of sentences (premise and hypothesis) as entailment, contradiction or semantic independence. Although deep learning models have shown promising performance for NLI in recent years, they rely on large scale expensive human-annotated datasets. Semi-supervised learning (SSL) is a popular technique for reducing the reliance on human annotation by leveraging unlabeled data for training. However, despite its substantial success on single sentence classification tasks where the challenge in making use of unlabeled data is to assign \u201cgood enough\u201d pseudo-labels, for NLI tasks, the nature of unlabeled data is more complex: one of the sentences in the pair (usually the hypothesis) along with the class label are missing from the data and require human annotations, which makes SSL for NLI more challenging. In this paper, we propose a novel way to incorporate unlabeled data in SSL for NLI where we use a conditional language model, BART to generate the hypotheses for the unlabeled sentences (used as premises). Our experiments show that our SSL framework successfully exploits unlabeled data and substantially improves the performance of four NLI datasets in low-resource settings. We release our code here: https://github.com/msadat3/SSL_for_NLI", + "author": "Mobashir Sadat; Cornelia Caragea", + "authorids": "/m/mobashir-sadat/; /c/cornelia-caragea/", + "bibtex": "@inproceedings{sadat-caragea-2022-learning,\n title = \"Learning to Infer from Unlabeled Data: A Semi-supervised Learning Approach for Robust Natural Language Inference\",\n author = \"Sadat, Mobashir and\n Caragea, Cornelia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.351/\",\n doi = \"10.18653/v1/2022.findings-emnlp.351\",\n pages = \"4763--4776\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.351.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.351/", + "pdf_size": 322876, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=444868143824360841&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Computer Science, University of Illinois Chicago; Computer Science, University of Illinois Chicago", + "aff_domain": "uic.edu;uic.edu", + "email": "uic.edu;uic.edu", + "github": "https://github.com/msadat3/SSL_for_NLI", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Illinois Chicago", + "aff_unique_dep": "Computer Science", + "aff_unique_url": "https://www.uic.edu", + "aff_unique_abbr": "UIC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chicago", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.280", + "title": "Learning to Model Editing Processes", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Most existing sequence generation models produce outputs in one pass, usually left-to-right. However, this is in contrast with a more natural approach that humans use in generating content; iterative refinement and editing. Recent work has introduced edit-based models for various tasks (such as neural machine translation and text style transfer), but these generally model a single edit step. In this work, we propose modeling editing processes, modeling the whole process of iteratively generating sequences. We form a conceptual framework to describe the likelihood of multi-step edits, and describe neural models that can learn a generative model of sequences based on these multistep edits. We introduce baseline results and metrics on this task, finding that modeling editing processes improves performance on a variety of axes on both our proposed task and related downstream tasks compared to previous single-step models of edits.", + "author": "Machel Reid; Graham Neubig", + "authorids": "/m/machel-reid/; /g/graham-neubig/", + "bibtex": "@inproceedings{reid-neubig-2022-learning,\n title = \"Learning to Model Editing Processes\",\n author = \"Reid, Machel and\n Neubig, Graham\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.280/\",\n doi = \"10.18653/v1/2022.findings-emnlp.280\",\n pages = \"3822--3832\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.280.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.280/", + "pdf_size": 1025080, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15615331016221008896&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of Tokyo\u2217; Carnegie Mellon University + Inspired Cognition", + "aff_domain": "google.com;cs.cmu.edu", + "email": "google.com;cs.cmu.edu", + "github": "https://github.com/machelreid/editpro", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1+2", + "aff_unique_norm": "University of Tokyo;Carnegie Mellon University;Inspired Cognition", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.cmu.edu;", + "aff_unique_abbr": "UTokyo;CMU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Japan;United States;" + }, + { + "id": "2022.findings-emnlp.346", + "title": "Learning to Model Multimodal Semantic Alignment for Story Visualization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Story visualization aims to generate a sequence of images to narrate each sentence in a multi-sentence story, where the images should be realistic and keep global consistency across dynamic scenes and characters. Current works face the problem of semantic misalignment because of their fixed architecture and diversity of input modalities. To address this problem, we explore the semantic alignment between text and image representations by learning to match their semantic levels in the GAN-based generative model. More specifically, we introduce dynamic interactions according to learning to dynamically explore various semantic depths and fuse the different-modal information at a matched semantic level, which thus relieves the text-image semantic misalignment problem. Extensive experiments on different datasets demonstrate the improvements of our approach, neither using segmentation masks nor auxiliary captioning networks, on image quality and story consistency, compared with state-of-the-art methods.", + "author": "Bowen Li; Thomas Lukasiewicz", + "authorids": "/b/bowen-li/; /t/thomas-lukasiewicz/", + "bibtex": "@inproceedings{li-lukasiewicz-2022-learning,\n title = \"Learning to Model Multimodal Semantic Alignment for Story Visualization\",\n author = \"Li, Bowen and\n Lukasiewicz, Thomas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.346/\",\n doi = \"10.18653/v1/2022.findings-emnlp.346\",\n pages = \"4712--4718\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.346.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.346/", + "pdf_size": 2805683, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12416513432555629335&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "University of Oxford; TU Wien + University of Oxford", + "aff_domain": "cs.ox.ac.uk;cs.ox.ac.uk", + "email": "cs.ox.ac.uk;cs.ox.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1+0", + "aff_unique_norm": "University of Oxford;Technische Universit\u00e4t Wien", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ox.ac.uk;https://www.tuwien.ac.at", + "aff_unique_abbr": "Oxford;TU Wien", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+0", + "aff_country_unique": "United Kingdom;Austria" + }, + { + "id": "2022.findings-emnlp.121", + "title": "Learning to Perform Complex Tasks through Compositional Fine-Tuning of Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "How to usefully encode compositional task structure has long been a core challenge in AI. Recent work in chain of thought prompting has shown that for very large neural language models (LMs), explicitly demonstrating the inferential steps involved in a target task may improve performance over end-to-end learning that focuses on the target task alone. However, chain of thought prompting has significant limitations due to its dependency on huge pretrained LMs. In this work, we present compositional fine-tuning (CFT): an approach based on explicitly decomposing a target task into component tasks, and then fine-tuning smaller LMs on a curriculum of such component tasks. We apply CFT to recommendation tasks in two domains, world travel and local dining, as well as a previously studied inferential task (sports understanding). We show that CFT outperforms end-to-end learning even with equal amounts of data, and gets consistently better as more component tasks are modeled via fine-tuning. Compared with chain of thought prompting, CFT performs at least as well using LMs only 7.4% of the size, and is moreover applicable to task domains for which data are not available during pretraining.", + "author": "Victor Bursztyn; David Demeter; Doug Downey; Larry Birnbaum", + "authorids": "/v/victor-bursztyn/; /d/david-demeter/; /d/doug-downey/; /l/larry-birnbaum/", + "bibtex": "@inproceedings{bursztyn-etal-2022-learning,\n title = \"Learning to Perform Complex Tasks through Compositional Fine-Tuning of Language Models\",\n author = \"Bursztyn, Victor and\n Demeter, David and\n Downey, Doug and\n Birnbaum, Larry\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.121/\",\n doi = \"10.18653/v1/2022.findings-emnlp.121\",\n pages = \"1676--1686\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.121.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.121/", + "pdf_size": 1073253, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1946470150205768888&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, Northwestern University, Evanston, IL, USA+Allen Institute for Artificial Intelligence, Seattle, WA, USA; Department of Computer Science, Northwestern University, Evanston, IL, USA; Department of Computer Science, Northwestern University, Evanston, IL, USA; Department of Computer Science, Northwestern University, Evanston, IL, USA", + "aff_domain": "u.northwestern.edu;u.northwestern.edu;northwestern.edu;northwestern.edu", + "email": "u.northwestern.edu;u.northwestern.edu;northwestern.edu;northwestern.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;0;0", + "aff_unique_norm": "Northwestern University;Allen Institute for Artificial Intelligence", + "aff_unique_dep": "Department of Computer Science;", + "aff_unique_url": "https://www.northwestern.edu;https://allenai.org", + "aff_unique_abbr": "NU;AI2", + "aff_campus_unique_index": "0+1;0;0;0", + "aff_campus_unique": "Evanston;Seattle", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.296", + "title": "Learning to Revise References for Faithful Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In real-world scenarios with naturally occurring datasets, reference summaries are noisy and may contain information that cannot be inferred from the source text. On large news corpora, removing low quality samples has been shown to reduce model hallucinations. Yet, for smaller, and/or noisier corpora, filtering is detrimental to performance. To improve reference quality while retaining all data, we propose a new approach: to selectively re-write unsupported reference sentences to better reflect source data. We automatically generate a synthetic dataset of positive and negative revisions by corrupting supported sentences and learn to revise reference sentences with contrastive learning. The intensity of revisions is treated as a controllable attribute so that, at inference, diverse candidates can be over-generated-then-rescored to balance faithfulness and abstraction. To test our methods, we extract noisy references from publicly available MIMIC-III discharge summaries for the task of hospital-course summarization, and vary the data on which models are trained. According to metrics and human evaluation, models trained on revised clinical references are much more faithful, informative, and fluent than models trained on original or filtered data.", + "author": "Griffin Adams; Han-Chin Shing; Qing Sun; Christopher Winestock; Kathleen McKeown; No\u00e9mie Elhadad", + "authorids": "/g/griffin-adams/; /h/han-chin-shing/; /q/qing-sun/; /c/christopher-winestock/; /k/kathleen-mckeown/; /n/noemie-elhadad/", + "bibtex": "@inproceedings{adams-etal-2022-learning,\n title = \"Learning to Revise References for Faithful Summarization\",\n author = \"Adams, Griffin and\n Shing, Han-Chin and\n Sun, Qing and\n Winestock, Christopher and\n McKeown, Kathleen and\n Elhadad, No{\\'e}mie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.296/\",\n doi = \"10.18653/v1/2022.findings-emnlp.296\",\n pages = \"4009--4027\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.296.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.296/", + "pdf_size": 711124, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17769762743191730438&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Columbia University, New York, NY + Amazon AWS AI, Seattle, WA; Amazon AWS AI, Seattle, WA; Amazon AWS AI, Seattle, WA; Amazon AWS AI, Seattle, WA; Columbia University, New York, NY + Amazon AWS AI, Seattle, WA; Columbia University, New York, NY", + "aff_domain": "columbia.edu;amazon.com;amazon.com;amazon.com;amazon.com;columbia.edu", + "email": "columbia.edu;amazon.com;amazon.com;amazon.com;amazon.com;columbia.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;1;1;0+1;0", + "aff_unique_norm": "Columbia University;Amazon", + "aff_unique_dep": ";Amazon AWS AI", + "aff_unique_url": "https://www.columbia.edu;https://aws.amazon.com", + "aff_unique_abbr": "Columbia;Amazon AWS", + "aff_campus_unique_index": "0+1;1;1;1;0+1;0", + "aff_campus_unique": "New York;Seattle", + "aff_country_unique_index": "0+0;0;0;0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.663", + "title": "Learning with Rejection for Abstractive Text Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "State-of-the-art abstractive summarization systems frequently hallucinate content that is not supported by the source document, mainly due to noise in the training dataset.Existing methods opt to drop the noisy samples or tokens from the training set entirely, reducing the effective training set size and creating an artificial propensity to copy words from the source. In this work, we propose a training objective for abstractive summarization based on rejection learning, in which the model learns whether or not to reject potentially noisy tokens. We further propose a regularized decoding objective that penalizes non-factual candidate summaries during inference by using the rejection probability learned during training.We show that our method considerably improves the factuality of generated summaries in automatic and human evaluations when compared to five baseline models, and that it does so while increasing the abstractiveness of the generated summaries.", + "author": "Meng Cao; Yue Dong; Jingyi He; Jackie Chi Kit Cheung", + "authorids": "/m/meng-cao/; /y/yue-dong/; /j/jingyi-he/; /j/jackie-chi-kit-cheung/", + "bibtex": "@inproceedings{cao-etal-2022-learning,\n title = \"Learning with Rejection for Abstractive Text Summarization\",\n author = \"Cao, Meng and\n Dong, Yue and\n He, Jingyi and\n Cheung, Jackie Chi Kit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.663/\",\n doi = \"10.18653/v1/2022.emnlp-main.663\",\n pages = \"9768--9780\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.663.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.663/", + "pdf_size": 341725, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=269166463517427994&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff": "Mila / McGill University, Montr\u00e9al, QC, Canada; Mila / McGill University, Montr\u00e9al, QC, Canada; Mila / McGill University, Montr\u00e9al, QC, Canada; Mila / McGill University, Montr\u00e9al, QC, Canada", + "aff_domain": "mail.mcgill.ca;mail.mcgill.ca;mail.mcgill.ca;cs.mcgill.ca", + "email": "mail.mcgill.ca;mail.mcgill.ca;mail.mcgill.ca;cs.mcgill.ca", + "github": "https://github.com/mcao516/rej-summ", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "McGill University", + "aff_unique_dep": "Mila", + "aff_unique_url": "https://www.mcgill.ca", + "aff_unique_abbr": "McGill", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Montr\u00e9al", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "2022.emnlp-main.301", + "title": "Less is More: Summary of Long Instructions is Better for Program Synthesis", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite the success of large pre-trained language models (LMs) such as Codex, they show below-par performance on the larger and more complicated programming related questions. We show that LMs benefit from the summarized version of complicated questions. Our findings show that superfluous information often present in problem description such as human characters, background stories, and names (which are included to help humans in understanding a task) does not help models in understanding a task. To this extent, we create a meta-dataset from the frequently used APPS dataset and the newly created CodeContests dataset for the program synthesis task. Our meta-dataset consists of human and synthesized summaries of the long and complicated programming questions. Experimental results on Codex show that our proposed approach outperforms baseline by 8.13% on the APPS dataset and 11.88% on the CodeContests dataset on an average in terms of strict accuracy. Our analysis shows that summaries significantly improve performance for introductory (9.86%) and interview (11.48%) related programming questions. However, it shows improvement by a small margin ( 2%) for competitive programming questions, implying the scope for future research direction.", + "author": "Kirby Kuznia; Swaroop Mishra; Mihir Parmar; Chitta Baral", + "authorids": "/k/kirby-kuznia/; /s/swaroop-mishra/; /m/mihir-parmar/; /c/chitta-baral/", + "bibtex": "@inproceedings{kuznia-etal-2022-less,\n title = \"Less is More: Summary of Long Instructions is Better for Program Synthesis\",\n author = \"Kuznia, Kirby and\n Mishra, Swaroop and\n Parmar, Mihir and\n Baral, Chitta\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.301/\",\n doi = \"10.18653/v1/2022.emnlp-main.301\",\n pages = \"4532--4552\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.301.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.301/", + "pdf_size": 1007186, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16456614099330753242&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Arizona State University; Arizona State University; Arizona State University; Arizona State University", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/kurbster/Prompt-Summarization", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Arizona State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.asu.edu", + "aff_unique_abbr": "ASU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.484", + "title": "Let the CAT out of the bag: Contrastive Attributed explanations for Text", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Contrastive explanations for understanding the behavior of black box models has gained a lot of attention recently as they provide potential for recourse. In this paper, we propose a method Contrastive Attributed explanations for Text (CAT) which provides contrastive explanations for natural language text data with a novel twist as we build and exploit attribute classifiers leading to more semantically meaningful explanations. To ensure that our contrastive generated text has the fewest possible edits with respect to the original text, while also being fluent and close to a human generated contrastive, we resort to a minimal perturbation approach regularized using a BERT language model and attribute classifiers trained on available attributes. We show through qualitative examples and a user study that our method not only conveys more insight because of these attributes, but also leads to better quality (contrastive) text. Quantitatively, we show that our method outperforms other state-of-the-art methods across four data sets on four benchmark metrics.", + "author": "Saneem Chemmengath; Amar Prakash Azad; Ronny Luss; Amit Dhurandhar", + "authorids": "/s/saneem-chemmengath/; /a/amar-prakash-azad/; /r/ronny-luss/; /a/amit-dhurandhar/", + "bibtex": "@inproceedings{chemmengath-etal-2022-cat,\n title = \"Let the {CAT} out of the bag: Contrastive Attributed explanations for Text\",\n author = \"Chemmengath, Saneem and\n Azad, Amar Prakash and\n Luss, Ronny and\n Dhurandhar, Amit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.484/\",\n doi = \"10.18653/v1/2022.emnlp-main.484\",\n pages = \"7190--7206\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.484.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.484/", + "pdf_size": 609523, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8602619737137699065&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 4, + "aff": "Microsoft; IBM Research; IBM Research; IBM Research", + "aff_domain": "microsoft.com;in.ibm.com;us.ibm.com;us.ibm.com", + "email": "microsoft.com;in.ibm.com;us.ibm.com;us.ibm.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;1", + "aff_unique_norm": "Microsoft Corporation;IBM", + "aff_unique_dep": ";IBM Research", + "aff_unique_url": "https://www.microsoft.com;https://www.ibm.com/research", + "aff_unique_abbr": "Microsoft;IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.393", + "title": "Leveraging Affirmative Interpretations from Negation Improves Natural Language Understanding", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Negation poses a challenge in many natural language understanding tasks. Inspired by the fact that understanding a negated statement often requires humans to infer affirmative interpretations, in this paper we show that doing so benefits models for three natural language understanding tasks. We present an automated procedure to collect pairs of sentences with negation and their affirmative interpretations, resulting in over 150,000 pairs. Experimental results show that leveraging these pairs helps (a) T5 generate affirmative interpretations from negations in a previous benchmark, and (b) a RoBERTa-based classifier solve the task of natural language inference. We also leverage our pairs to build a plug-and-play neural generator that given a negated statement generates an affirmative interpretation. Then, we incorporate the pretrained generator into a RoBERTa-based classifier for sentiment analysis and show that doing so improves the results. Crucially, our proposal does not require any manual effort.", + "author": "Md Mosharaf Hossain; Eduardo Blanco", + "authorids": "/m/md-mosharaf-hossain/; /e/eduardo-blanco/", + "bibtex": "@inproceedings{hossain-blanco-2022-leveraging,\n title = \"Leveraging Affirmative Interpretations from Negation Improves Natural Language Understanding\",\n author = \"Hossain, Md Mosharaf and\n Blanco, Eduardo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.393/\",\n doi = \"10.18653/v1/2022.emnlp-main.393\",\n pages = \"5833--5847\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.393.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.393/", + "pdf_size": 249685, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10147323401974306244&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2 + }, + { + "id": "2022.findings-emnlp.328", + "title": "Leveraging Data Recasting to Enhance Tabular Reasoning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Creating challenging tabular inference data is essential for learning complex reasoning. Prior work has mostly relied on two data generation strategies. The first is human annotation, which yields linguistically diverse data but is difficult to scale. The second category for creation is synthetic generation, which is scalable and cost effective but lacks inventiveness. In this research, we present a framework for semi-automatically recasting existing tabular data to make use of the benefits of both approaches. We utilize our framework to build tabular NLI instances from five datasets that were initially intended for tasks like table2text creation, tabular Q/A, and semantic parsing. We demonstrate that recasted data could be used as evaluation benchmarks as well as augmentation data to enhance performance on tabular NLI tasks. Furthermore, we investigate the effectiveness of models trained on recasted data in the zero-shot scenario, and analyse trends in performance across different recasted datasets types.", + "author": "Aashna Jena; Vivek Gupta; Manish Shrivastava; Julian Eisenschlos", + "authorids": "/a/aashna-jena/; /v/vivek-gupta/; /m/manish-shrivastava/; /j/julian-eisenschlos/", + "bibtex": "@inproceedings{jena-etal-2022-leveraging,\n title = \"Leveraging Data Recasting to Enhance Tabular Reasoning\",\n author = \"Jena, Aashna and\n Gupta, Vivek and\n Shrivastava, Manish and\n Eisenschlos, Julian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.328/\",\n doi = \"10.18653/v1/2022.findings-emnlp.328\",\n pages = \"4483--4496\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.328.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.328/", + "pdf_size": 227654, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3977739625907874670&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.408", + "title": "Leveraging Locality in Abstractive Text Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Neural attention models have achieved significant improvements on many natural language processing tasks. However, the quadratic memory complexity of the self-attention module with respect to the input length hinders their applications in long text summarization. Instead of designing more efficient attention modules, we approach this problem by investigating if models with a restricted context can have competitive performance compared with the memory-efficient attention models that maintain a global context by treating the input as a single sequence. Our model is applied to individual pages, which contain parts of inputs grouped by the principle of locality, during both the encoding and decoding stages. We empirically investigated three kinds of locality in text summarization at different levels of granularity, ranging from sentences to documents. Our experimental results show that our model has a better performance compared with strong baseline models with efficient attention modules, and our analysis provides further insights into our locality-aware modeling strategy.", + "author": "Yixin Liu; Ansong Ni; Linyong Nan; Budhaditya Deb; Chenguang Zhu; Ahmed Hassan Awadallah; Dragomir Radev", + "authorids": "/y/yixin-liu/; /a/ansong-ni/; /l/linyong-nan/; /b/budhaditya-deb/; /c/chenguang-zhu/; /a/ahmed-hassan/; /d/dragomir-radev/", + "bibtex": "@inproceedings{liu-etal-2022-leveraging-locality,\n title = \"Leveraging Locality in Abstractive Text Summarization\",\n author = \"Liu, Yixin and\n Ni, Ansong and\n Nan, Linyong and\n Deb, Budhaditya and\n Zhu, Chenguang and\n Awadallah, Ahmed Hassan and\n Radev, Dragomir\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.408/\",\n doi = \"10.18653/v1/2022.emnlp-main.408\",\n pages = \"6081--6093\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.408.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.408/", + "pdf_size": 753314, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17858393528070043702&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Yale University; Yale University; Yale University; Microsoft Research; Microsoft Research; Microsoft Research; Yale University", + "aff_domain": "yale.edu;yale.edu;yale.edu;microsoft.com;microsoft.com;microsoft.com;yale.edu", + "email": "yale.edu;yale.edu;yale.edu;microsoft.com;microsoft.com;microsoft.com;yale.edu", + "github": "https://github.com/yixinL7/PageSum", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;1;1;0", + "aff_unique_norm": "Yale University;Microsoft Corporation", + "aff_unique_dep": ";Microsoft Research", + "aff_unique_url": "https://www.yale.edu;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "Yale;MSR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.97", + "title": "Leveraging Only the Category Name for Aspect Detection through Prompt-based Constrained Clustering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Aspect category detection (ACD) aims to automatically identify user-concerned aspects from online reviews, which is of great value for evaluating the fine-grained performance of a product. The most recent solutions tackle this problem via weakly supervised methods, achieving remarkable improvement over unsupervised methods. However, a closer look at these methods reveals that the required human efforts are nontrivial and can sometimes be hard to obtain. In this study, we explore the possibility of minimizing human guidance while improving detection performance, with a deep clustering method that relies merely on the category name of each aspect and a pretrained language model (LM). The LM, combined with prompt techniques, is employed as a knowledge base to automatically generate constraints for clustering, as well as to provide a representation space to perform the clustering. Our method (1) extracts extensive keywords to expand our understanding of each aspect, (2) automatically generates instance-level and concept-level constraints for clustering, and (3) trains the clustering model with the above constraints. We demonstrate the capability of the proposed framework through extensive experiments on nine benchmark datasets. Our model not only performs noticeably better than existing unsupervised approaches but also considerably surpasses weakly supervised methods that require more human efforts.", + "author": "Yazheng Li; Pengyun Wang; Yasheng Wang; Yong Dai; Yadao Wang; Lujia Pan; Zenglin Xu", + "authorids": "/y/yazheng-li/; /p/pengyun-wang/; /y/yasheng-wang/; /y/yong-dai/; /y/yadao-wang/; /l/lujia-pan/; /z/zenglin-xu/", + "bibtex": "@inproceedings{li-etal-2022-leveraging,\n title = \"Leveraging Only the Category Name for Aspect Detection through Prompt-based Constrained Clustering\",\n author = \"Li, Yazheng and\n Wang, Pengyun and\n Wang, Yasheng and\n Dai, Yong and\n Wang, Yadao and\n Pan, Lujia and\n Xu, Zenglin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.97/\",\n doi = \"10.18653/v1/2022.findings-emnlp.97\",\n pages = \"1352--1364\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.97.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.97/", + "pdf_size": 1209128, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13788456649588285942&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 0, + "aff": "University of Electronic Science and Technology of China, Chengdu, China; Huawei Noah\u2019s Ark Lab + Peng Cheng Lab, Shenzhen, China; Huawei Noah\u2019s Ark Lab, Shenzhen, China; University of Electronic Science and Technology of China, Chengdu, China; Huawei Noah\u2019s Ark Lab, Shenzhen, China; Huawei Noah\u2019s Ark Lab, Shenzhen, China; Harbin Institute of Technology, Shenzhen, China + Peng Cheng Lab, Shenzhen, China", + "aff_domain": "163.com;huawei.com;huawei.com;gmail.com;huawei.com;huawei.com;gmail.com", + "email": "163.com;huawei.com;huawei.com;gmail.com;huawei.com;huawei.com;gmail.com", + "github": "https://github.com/liyazheng/PCCT", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1+2;3;0;3;3;4+2", + "aff_unique_norm": "University of Electronic Science and Technology of China;Huawei;Peng Cheng Lab;Huawei Noah\u2019s Ark Lab;Harbin Institute of Technology", + "aff_unique_dep": ";Noah\u2019s Ark Lab;;;", + "aff_unique_url": "http://www.uestc.edu.cn;https://www.huawei.com;;https://www.huawei.com/en/ai/noahs-ark-lab;http://en.hhit.edu.cn/", + "aff_unique_abbr": "UESTC;Huawei;;HNA Lab;HIT", + "aff_campus_unique_index": "0;2;2;0;2;2;2+2", + "aff_campus_unique": "Chengdu;;Shenzhen", + "aff_country_unique_index": "0;0+0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.425", + "title": "Leveraging Open Data and Task Augmentation to Automated Behavioral Coding of Psychotherapy Conversations in Low-Resource Scenarios", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In psychotherapy interactions, the quality of a session is assessed by codifying the communicative behaviors of participants during the conversation through manual observation and annotation. Developing computational approaches for automated behavioral coding can reduce the burden on human coders and facilitate the objective evaluation of the intervention. In the real world, however, implementing such algorithms is associated with data sparsity challenges since privacy concerns lead to limited available in-domain data. In this paper, we leverage a publicly available conversation-based dataset and transfer knowledge to the low-resource behavioral coding task by performing an intermediate language model training via meta-learning. We introduce a task augmentation method to produce a large number of \u201canalogy tasks\u201d \u2014 tasks similar to the target one \u2014 and demonstrate that the proposed framework predicts target behaviors more accurately than all the other baseline models.", + "author": "Zhuohao Chen; Nikolaos Flemotomos; Zac Imel; David Atkins; Shrikanth Narayanan", + "authorids": "/z/zhuohao-chen/; /n/nikolaos-flemotomos/; /z/zac-imel/; /d/david-atkins/; /s/shrikanth-narayanan/", + "bibtex": "@inproceedings{chen-etal-2022-leveraging-open,\n title = \"Leveraging Open Data and Task Augmentation to Automated Behavioral Coding of Psychotherapy Conversations in Low-Resource Scenarios\",\n author = \"Chen, Zhuohao and\n Flemotomos, Nikolaos and\n Imel, Zac and\n Atkins, David and\n Narayanan, Shrikanth\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.425/\",\n doi = \"10.18653/v1/2022.findings-emnlp.425\",\n pages = \"5787--5795\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.425.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.425/", + "pdf_size": 292380, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=604365886299485060&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of Southern California; University of Southern California+Apple Inc.; University of Utah; University of Washington; University of Southern California", + "aff_domain": "sail.usc.edu; ;utah.edu;u.washington.edu; ", + "email": "sail.usc.edu; ;utah.edu;u.washington.edu; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;2;3;0", + "aff_unique_norm": "University of Southern California;Apple Inc.;University of Utah;University of Washington", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.usc.edu;https://www.apple.com;https://www.utah.edu;https://www.washington.edu", + "aff_unique_abbr": "USC;Apple;Utah;UW", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.660", + "title": "Leveraging QA Datasets to Improve Generative Data Augmentation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The ability of generative language models (GLMs) to generate text has improved considerably in the last few years, enabling their use for generative data augmentation. In this work, we propose CONDA, an approach to further improve GLM\u2019s ability to generate synthetic data by reformulating data generation as context generation for a given question-answer (QA) pair and leveraging QA datasets for training context generators. Then, we cast downstream tasks into the same question answering format and adapt the fine-tuned context generators to the target task domain. Finally, we use the fine-tuned GLM to generate relevant contexts, which are in turn used as synthetic training data for their corresponding tasks. We perform extensive experiments on multiple classification datasets and demonstrate substantial improvements in performance for both few- and zero-shot settings. Our analysis reveals that QA datasets that require high-level reasoning abilities (e.g., abstractive and common-sense QA datasets) tend to give the best boost in performance in both few-shot and zero-shot settings.", + "author": "Dheeraj Mekala; Tu Vu; Timo Schick; Jingbo Shang", + "authorids": "/d/dheeraj-mekala/; /t/tu-vu/; /t/timo-schick/; /j/jingbo-shang/", + "bibtex": "@inproceedings{mekala-etal-2022-leveraging,\n title = \"Leveraging {QA} Datasets to Improve Generative Data Augmentation\",\n author = \"Mekala, Dheeraj and\n Vu, Tu and\n Schick, Timo and\n Shang, Jingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.660/\",\n doi = \"10.18653/v1/2022.emnlp-main.660\",\n pages = \"9737--9750\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.660.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.660/", + "pdf_size": 488439, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4120021172693159180&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of California San Diego\u2662; University of Massachusetts Amherst\u2660; Meta AI Research\u2663; Hal\u0131c\u0131o \u02d8glu Data Science Institute, University of California San Diego\u2661", + "aff_domain": "ucsd.edu;ucsd.edu;cs.umass.edu;fb.com", + "email": "ucsd.edu;ucsd.edu;cs.umass.edu;fb.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "University of California, San Diego;University of Massachusetts Amherst;Meta Platforms, Inc.;University of California San Diego", + "aff_unique_dep": ";;Meta AI Research;Hal\u0131c\u0131o\u011flu Data Science Institute", + "aff_unique_url": "https://ucsd.edu;https://www.umass.edu;https://meta.com;https://ucsd.edu", + "aff_unique_abbr": "UCSD;UMass Amherst;Meta AI;UCSD", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "San Diego;Amherst;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.350", + "title": "Leveraging Training Dynamics and Self-Training for Text Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The effectiveness of pre-trained language models in downstream tasks is highly dependent on the amount of labeled data available for training. Semi-supervised learning (SSL) is a promising technique that has seen wide attention recently due to its effectiveness in improving deep learning models when training data is scarce. Common approaches employ a teacher-student self-training framework, where a teacher network generates pseudo-labels for unlabeled data, which are then used to iteratively train a student network. In this paper, we propose a new self-training approach for text classification that leverages training dynamics of unlabeled data. We evaluate our approach on a wide range of text classification tasks, including emotion detection, sentiment analysis, question classification and gramaticality, which span a variety of domains, e.g, Reddit, Twitter, and online forums. Notably, our method is successful on all benchmarks, obtaining an average increase in F1 score of 3.5% over strong baselines in low resource settings.", + "author": "Tiberiu Sosea; Cornelia Caragea", + "authorids": "/t/tiberiu-sosea/; /c/cornelia-caragea/", + "bibtex": "@inproceedings{sosea-caragea-2022-leveraging,\n title = \"Leveraging Training Dynamics and Self-Training for Text Classification\",\n author = \"Sosea, Tiberiu and\n Caragea, Cornelia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.350/\",\n doi = \"10.18653/v1/2022.findings-emnlp.350\",\n pages = \"4750--4762\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.350.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.350/", + "pdf_size": 347413, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12001546934758228024&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Computer Science, University of Illinois Chicago; Computer Science, University of Illinois Chicago", + "aff_domain": "uic.edu;uic.edu", + "email": "uic.edu;uic.edu", + "github": "https://github.com/tsosea2/AUM-ST", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Illinois Chicago", + "aff_unique_dep": "Computer Science", + "aff_unique_url": "https://www.uic.edu", + "aff_unique_abbr": "UIC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chicago", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.519", + "title": "Lexi: Self-Supervised Learning of the UI Language", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Humans can learn to operate the user interface (UI) of an application by reading an instruction manual or how-to guide. Along with text, these resources include visual content such as UI screenshots and images of application icons referenced in the text. We explore how to leverage this data to learn generic visio-linguistic representations of UI screens and their components. These representations are useful in many real applications, such as accessibility, voice navigation, and task automation. Prior UI representation models rely on UI metadata (UI trees and accessibility labels), which is often missing, incompletely defined, or not accessible. We avoid such a dependency, and propose Lexi, a pre-trained vision and language model designed to handle the unique features of UI screens, including their text richness and context sensitivity. To train Lexi we curate the UICaption dataset consisting of 114k UI images paired with descriptions of their functionality. We evaluate Lexi on four tasks: UI action entailment, instruction-based UI image retrieval, grounding referring expressions, and UI entity recognition.", + "author": "Pratyay Banerjee; Shweti Mahajan; Kushal Arora; Chitta Baral; Oriana Riva", + "authorids": "/p/pratyay-banerjee/; /s/shweti-mahajan/; /k/kushal-arora/; /c/chitta-baral/; /o/oriana-riva/", + "bibtex": "@inproceedings{banerjee-etal-2022-lexi,\n title = \"{L}exi: Self-Supervised Learning of the {UI} Language\",\n author = \"Banerjee, Pratyay and\n Mahajan, Shweti and\n Arora, Kushal and\n Baral, Chitta and\n Riva, Oriana\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.519/\",\n doi = \"10.18653/v1/2022.findings-emnlp.519\",\n pages = \"6992--7007\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.519.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.519/", + "pdf_size": 3853320, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4318134942869211025&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Arizona State University; Microsoft Research; McGill University; Arizona State University; Microsoft Research", + "aff_domain": "asu.edu;msr.com;mcgill.ca;asu.edu;msr.com", + "email": "asu.edu;msr.com;mcgill.ca;asu.edu;msr.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;1", + "aff_unique_norm": "Arizona State University;Microsoft Corporation;McGill University", + "aff_unique_dep": ";Microsoft Research;", + "aff_unique_url": "https://www.asu.edu;https://www.microsoft.com/en-us/research;https://www.mcgill.ca", + "aff_unique_abbr": "ASU;MSR;McGill", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "2022.findings-emnlp.257", + "title": "Lexical Entailment with Hierarchy Representations by Deep Metric Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this paper, we introduce a novel method for lexical entailment tasks, which detects a hyponym-hypernym relation among words. Existing lexical entailment studies are lacking in generalization performance, as they cannot be applied to words that are not included in the training dataset. Moreover, existing work evaluates the performance by using the dataset that contains words used for training. This study proposes a method that learns a mapping from word embeddings to the hierarchical embeddings in order to predict the hypernymy relations of any input words. To validate the generalization performance, we conduct experiments using a train dataset that does not overlap with the evaluation dataset. As a result, our method achieved state-of-the-art performance and showed robustness for unknown words.", + "author": "Naomi Sato; Masaru Isonuma; Kimitaka Asatani; Shoya Ishizuka; Aori Shimizu; Ichiro Sakata", + "authorids": "/n/naomi-sato/; /m/masaru-isonuma/; /k/kimitaka-asatani/; /s/shoya-ishizuka/; /a/aori-shimizu/; /i/ichiro-sakata/", + "bibtex": "@inproceedings{sato-etal-2022-lexical,\n title = \"Lexical Entailment with Hierarchy Representations by Deep Metric Learning\",\n author = \"Sato, Naomi and\n Isonuma, Masaru and\n Asatani, Kimitaka and\n Ishizuka, Shoya and\n Shimizu, Aori and\n Sakata, Ichiro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.257/\",\n doi = \"10.18653/v1/2022.findings-emnlp.257\",\n pages = \"3517--3522\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.257.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.257/", + "pdf_size": 473676, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1839457431698962388&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "The University of Tokyo; The University of Tokyo; The University of Tokyo; Daikin Industries Ltd.; Daikin Industries Ltd.; The University of Tokyo", + "aff_domain": "ipr-ctr.t.u-tokyo.ac.jp;ipr-ctr.t.u-tokyo.ac.jp;ipr-ctr.t.u-tokyo.ac.jp;daikin.co.jp;daikin.co.jp;ipr-ctr.t.u-tokyo.ac.jp", + "email": "ipr-ctr.t.u-tokyo.ac.jp;ipr-ctr.t.u-tokyo.ac.jp;ipr-ctr.t.u-tokyo.ac.jp;daikin.co.jp;daikin.co.jp;ipr-ctr.t.u-tokyo.ac.jp", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;1;0", + "aff_unique_norm": "University of Tokyo;Daikin Industries", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.daikin.com", + "aff_unique_abbr": "UTokyo;Daikin", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.findings-emnlp.323", + "title": "Lexical Generalization Improves with Larger Models and Longer Training", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While fine-tuned language models perform well on many language tasks, they were also shown to rely on superficial surface features such as lexical overlap. Excessive utilization of such heuristics can lead to failure on challenging inputs. We analyze the use of lexical overlap heuristics in natural language inference, paraphrase detection, and reading comprehension (using a novel contrastive dataset),and find that larger models are much less susceptible to adopting lexical overlap heuristics. We also find that longer training leads models to abandon lexical overlap heuristics. Finally, We provide evidence that the disparity between models size has its source in the pre-trained model.", + "author": "Elron Bandel; Yoav Goldberg; Yanai Elazar", + "authorids": "/e/elron-bandel/; /y/yoav-goldberg/; /y/yanai-elazar/", + "bibtex": "@inproceedings{bandel-etal-2022-lexical,\n title = \"Lexical Generalization Improves with Larger Models and Longer Training\",\n author = \"Bandel, Elron and\n Goldberg, Yoav and\n Elazar, Yanai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.323/\",\n doi = \"10.18653/v1/2022.findings-emnlp.323\",\n pages = \"4398--4410\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.323.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.323/", + "pdf_size": 950979, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9547809195302401184&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff": "Computer Science Department, Bar Ilan University + IBM Research; Computer Science Department, Bar Ilan University + Allen Institute for Artificial Intelligence; Paul G. Allen School of Computer Science and Engineering, University of Washington + Allen Institute for Artificial Intelligence", + "aff_domain": "gmail.com; ; ", + "email": "gmail.com; ; ", + "github": "https://github.com/elronbandel/lexical-generalization", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+2;3+2", + "aff_unique_norm": "Bar Ilan University;IBM;Allen Institute for Artificial Intelligence;University of Washington", + "aff_unique_dep": "Computer Science Department;IBM Research;;Paul G. Allen School of Computer Science and Engineering", + "aff_unique_url": "https://www.biu.ac.il;https://www.ibm.com/research;https://allenai.org;https://www.cs.washington.edu", + "aff_unique_abbr": "BIU;IBM;AI2;UW", + "aff_campus_unique_index": ";;1", + "aff_campus_unique": ";Seattle", + "aff_country_unique_index": "0+1;0+1;1+1", + "aff_country_unique": "Israel;United States" + }, + { + "id": "2022.findings-emnlp.31", + "title": "Lexicon-Enhanced Self-Supervised Training for Multilingual Dense Retrieval", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent multilingual pre-trained models have shown better performance in various multilingual tasks. However, these models perform poorly on multilingual retrieval tasks due to lacking multilingual training data. In this paper, we propose to mine and generate self-supervised training data based on a large-scale unlabeled corpus. We carefully design a mining method which combines the sparse and dense models to mine the relevance of unlabeled queries and passages. And we introduce a query generator to generate more queries in target languages for unlabeled passages. Through extensive experiments on Mr. TYDI dataset and an industrial dataset from a commercial search engine, we demonstrate that our method performs better than baselines based on various pre-trained multilingual models. Our method even achieves on-par performance with the supervised method on the latter dataset.", + "author": "Houxing Ren; Linjun Shou; Jian Pei; Ning Wu; Ming Gong; Daxin Jiang", + "authorids": "/h/houxing-ren/; /l/linjun-shou/; /j/jian-pei/; /n/ning-wu/; /m/ming-gong/; /d/daxin-jiang/", + "bibtex": "@inproceedings{ren-etal-2022-lexicon,\n title = \"Lexicon-Enhanced Self-Supervised Training for Multilingual Dense Retrieval\",\n author = \"Ren, Houxing and\n Shou, Linjun and\n Pei, Jian and\n Wu, Ning and\n Gong, Ming and\n Jiang, Daxin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.31/\",\n doi = \"10.18653/v1/2022.findings-emnlp.31\",\n pages = \"444--459\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.31.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.31/", + "pdf_size": 443532, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4248491750620126934&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "School of Computer Science and Engineering, Beihang University; Microsoft STC Asia; Duke University, Durham, NC, USA 27705; Microsoft STC Asia; Microsoft STC Asia; Microsoft STC Asia", + "aff_domain": "buaa.edu.cn;microsoft.com;duke.edu;microsoft.com;microsoft.com;microsoft.com", + "email": "buaa.edu.cn;microsoft.com;duke.edu;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;1;1;1", + "aff_unique_norm": "Beihang University;Microsoft;Duke University", + "aff_unique_dep": "School of Computer Science and Engineering;STC;", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.microsoft.com;https://www.duke.edu", + "aff_unique_abbr": "BUAA;MS;Duke", + "aff_campus_unique_index": "1;2;1;1;1", + "aff_campus_unique": ";Asia;Durham", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.232", + "title": "Life is a Circus and We are the Clowns: Automatically Finding Analogies between Situations and Processes", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Analogy-making gives rise to reasoning, abstraction, flexible categorization and counterfactual inference \u2013 abilities lacking in even the best AI systems today. Much research has suggested that analogies are key to non-brittle systems that can adapt to new domains. Despite their importance, analogies received little attention in the NLP community, with most research focusing on simple word analogies. Work that tackled more complex analogies relied heavily on manually constructed, hard-to-scale input representations.In this work, we explore a more realistic, challenging setup: our input is a pair of natural language procedural texts, describing a situation or a process (e.g., how the heart works/how a pump works). Our goal is to automatically extract entities and their relations from the text and find a mapping between the different domains based on relational similarity (e.g., blood is mapped to water). We develop an interpretable, scalable algorithm and demonstrate that it identifies the correct mappings 87% of the time for procedural texts and 94% for stories from cognitive-psychology literature. We show it can extract analogies from a large dataset of procedural texts, achieving 79% precision (analogy prevalence in data: 3%). Lastly, we demonstrate that our algorithm is robust to paraphrasing the input texts", + "author": "Oren Sultan; Dafna Shahaf", + "authorids": "/o/oren-sultan/; /d/dafna-shahaf/", + "bibtex": "@inproceedings{sultan-shahaf-2022-life,\n title = \"Life is a Circus and We are the Clowns: Automatically Finding Analogies between Situations and Processes\",\n author = \"Sultan, Oren and\n Shahaf, Dafna\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.232/\",\n doi = \"10.18653/v1/2022.emnlp-main.232\",\n pages = \"3547--3562\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.232.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.232/", + "pdf_size": 1589577, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5395393244847009955&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "The Hebrew University of Jerusalem; The Hebrew University of Jerusalem", + "aff_domain": "mail.huji.ac.il;cs.huji.ac.il", + "email": "mail.huji.ac.il;cs.huji.ac.il", + "github": "https://github.com/orensul/analogies_mining", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The Hebrew University of Jerusalem", + "aff_unique_dep": "", + "aff_unique_url": "https://www.huji.ac.il", + "aff_unique_abbr": "HUJI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.emnlp-main.52", + "title": "LightEA: A Scalable, Robust, and Interpretable Entity Alignment Framework via Three-view Label Propagation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Entity Alignment (EA) aims to find equivalent entity pairs between KGs, which is the core step to bridging and integrating multi-source KGs. In this paper, we argue that existing complex EA methods inevitably inherit the inborn defects from their neural network lineage: poor interpretability and weak scalability. Inspired by recent studies, we reinvent the classical Label Propagation algorithm to effectively run on KGs and propose a neural-free EA framework \u2014 LightEA, consisting of three efficient components: (i) Random Orthogonal Label Generation, (ii) Three-view Label Propagation, and (iii) Sparse Sinkhorn Operation.According to the extensive experiments on public datasets, LightEA has impressive scalability, robustness, and interpretability. With a mere tenth of time consumption, LightEA achieves comparable results to state-of-the-art methods across all datasets and even surpasses them on many. Besides, due to the computational process of LightEA being entirely linear, we could trace the propagation process at each step and clearly explain how the entities are aligned.", + "author": "Xin Mao; Wenting Wang; Yuanbin Wu; Man Lan", + "authorids": "/x/xinnian-mao/; /w/wenting-wang/; /y/yuanbin-wu/; /m/man-lan/", + "bibtex": "@inproceedings{mao-etal-2022-lightea,\n title = \"{L}ight{EA}: A Scalable, Robust, and Interpretable Entity Alignment Framework via Three-view Label Propagation\",\n author = \"Mao, Xin and\n Wang, Wenting and\n Wu, Yuanbin and\n Lan, Man\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.52/\",\n doi = \"10.18653/v1/2022.emnlp-main.52\",\n pages = \"825--838\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.52.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.52/", + "pdf_size": 1351006, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10014511097141979458&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Computer Science and Technology, East China Normal University + Shanghai Institute of AI for Education, East China Normal University; TikTok Group, Singapore; School of Computer Science and Technology, East China Normal University + Shanghai Institute of AI for Education, East China Normal University; School of Computer Science and Technology, East China Normal University + Shanghai Institute of AI for Education, East China Normal University", + "aff_domain": "stu.ecnu.edu.cn;bytedance.com;cs.ecnu.edu.cn;cs.ecnu.edu.cn", + "email": "stu.ecnu.edu.cn;bytedance.com;cs.ecnu.edu.cn;cs.ecnu.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;1;0+0;0+0", + "aff_unique_norm": "East China Normal University;TikTok Group", + "aff_unique_dep": "School of Computer Science and Technology;", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.tiktok.com", + "aff_unique_abbr": "ECNU;TikTok", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0;1;0+0;0+0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.emnlp-main.24", + "title": "Linearizing Transformer with Key-Value Memory", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Efficient transformer variants with linear time complexity have been developed to mitigate the quadratic computational overhead of the vanilla transformer. Among them are low-rank projection methods such as Linformer and kernel-based Transformers. Despite their unique merits, they usually suffer from a performance drop comparing with the vanilla transformer on many sequence generation tasks, and often fail to obtain computation gain when the generation is short. We propose Memsizer, an approach towards closing the performance gap while improving the efficiency even with short generation. It projects the source sequences into lower dimension representations like Linformer, while enjoying efficient recurrent-style incremental computation similar to kernel-based transformers. This yields linear computation time and constant memory complexity at inference time. Memsizer also employs a lightweight multi-head mechanism which renders the computation as light as a single-head model. We demonstrate that Memsizer provides an improved balance between efficiency and accuracy over the vanilla transformer and other efficient transformer variants in three typical sequence generation tasks, including machine translation, abstractive text summarization, and language modeling.", + "author": "Yizhe Zhang; Deng Cai", + "authorids": "/y/yizhe-zhang/; /d/deng-cai/", + "bibtex": "@inproceedings{zhang-cai-2022-linearizing,\n title = \"Linearizing Transformer with Key-Value Memory\",\n author = \"Zhang, Yizhe and\n Cai, Deng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.24/\",\n doi = \"10.18653/v1/2022.emnlp-main.24\",\n pages = \"346--359\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.24.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.24/", + "pdf_size": 936708, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=415969859763089781&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 3, + "aff": "Meta AI\u2020; The Chinese University of Hong Kong", + "aff_domain": "hotmail.com;gmail.com", + "email": "hotmail.com;gmail.com", + "github": "https://github.com/jcyk/memsizer", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Meta Platforms, Inc.;The Chinese University of Hong Kong", + "aff_unique_dep": "Meta AI;", + "aff_unique_url": "https://meta.com;https://www.cuhk.edu.hk", + "aff_unique_abbr": "Meta AI;CUHK", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.121", + "title": "Linguistic Corpus Annotation for Automatic Text Simplification Evaluation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Evaluating automatic text simplification (ATS) systems is a difficult task that is either performed by automatic metrics or user-based evaluations. However, from a linguistic point-of-view, it is not always clear on what bases these evaluations operate. In this paper, we propose annotations of the ASSET corpus that can be used to shed more light on ATS evaluation. In addition to contributing with this resource, we show how it can be used to analyze SARI\u2019s behavior and to re-evaluate existing ATS systems. We present our insights as a step to improve ATS evaluation protocols in the future.", + "author": "R\u00e9mi Cardon; Adrien Bibal; Rodrigo Wilkens; David Alfter; Magali Norr\u00e9; Adeline M\u00fcller; Watrin Patrick; Thomas Fran\u00e7ois", + "authorids": "/r/remi-cardon/; /a/adrien-bibal/; /r/rodrigo-wilkens/; /d/david-alfter/; /m/magali-norre/; /a/adeline-muller/; /w/watrin-patrick/; /t/thomas-francois/", + "bibtex": "@inproceedings{cardon-etal-2022-linguistic,\n title = \"Linguistic Corpus Annotation for Automatic Text Simplification Evaluation\",\n author = {Cardon, R{\\'e}mi and\n Bibal, Adrien and\n Wilkens, Rodrigo and\n Alfter, David and\n Norr{\\'e}, Magali and\n M{\\\"u}ller, Adeline and\n Patrick, Watrin and\n Fran{\\c{c}}ois, Thomas},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.121/\",\n doi = \"10.18653/v1/2022.emnlp-main.121\",\n pages = \"1842--1866\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.121.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.121/", + "pdf_size": 1275475, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7276434351026250779&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "CENTAL, IL&C, University of Louvain, Belgium; CENTAL, IL&C, University of Louvain, Belgium; CENTAL, IL&C, University of Louvain, Belgium; CENTAL, IL&C, University of Louvain, Belgium; CENTAL, IL&C, University of Louvain, Belgium; CENTAL, IL&C, University of Louvain, Belgium; CENTAL, IL&C, University of Louvain, Belgium; CENTAL, IL&C, University of Louvain, Belgium", + "aff_domain": "uclouvain.be;uclouvain.be;uclouvain.be;uclouvain.be;uclouvain.be;uclouvain.be;uclouvain.be;uclouvain.be", + "email": "uclouvain.be;uclouvain.be;uclouvain.be;uclouvain.be;uclouvain.be;uclouvain.be;uclouvain.be;uclouvain.be", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "University of Louvain", + "aff_unique_dep": "CENTAL, IL&C", + "aff_unique_url": "https://www.uclouvain.be", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "Belgium" + }, + { + "id": "2022.findings-emnlp.40", + "title": "Linguistic Rules-Based Corpus Generation for Native Chinese Grammatical Error Correction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Chinese Grammatical Error Correction (CGEC) is both a challenging NLP task and a common application in human daily life. Recently, many data-driven approaches are proposed for the development of CGEC research. However, there are two major limitations in the CGEC field: First, the lack of high-quality annotated training corpora prevents the performance of existing CGEC models from being significantly improved. Second, the grammatical errors in widely used test sets are not made by native Chinese speakers, resulting in a significant gap between the CGEC models and the real application. In this paper, we propose a linguistic rules-based approach to construct large-scale CGEC training corpora with automatically generated grammatical errors. Additionally, we present a challenging CGEC benchmark derived entirely from errors made by native Chinese speakers in real-world scenarios. Extensive experiments and detailed analyses not only demonstrate that the training data constructed by our method effectively improves the performance of CGEC models, but also reflect that our benchmark is an excellent resource for further development of the CGEC field.", + "author": "Shirong Ma; Yinghui Li; Rongyi Sun; Qingyu Zhou; Shulin Huang; Ding Zhang; Li Yangning; Ruiyang Liu; Zhongli Li; Yunbo Cao; Haitao Zheng; Ying Shen", + "authorids": "/s/shirong-ma/; /y/yinghui-li/; /r/rongyi-sun/; /q/qingyu-zhou/; /s/shulin-huang/; /d/ding-zhang/; /l/li-yangning/; /r/ruiyang-liu/; /z/zhongli-li/; /y/yunbo-cao/; /h/haitao-zheng/; /y/ying-shen/", + "bibtex": "@inproceedings{ma-etal-2022-linguistic,\n title = \"Linguistic Rules-Based Corpus Generation for Native {C}hinese Grammatical Error Correction\",\n author = \"Ma, Shirong and\n Li, Yinghui and\n Sun, Rongyi and\n Zhou, Qingyu and\n Huang, Shulin and\n Zhang, Ding and\n Yangning, Li and\n Liu, Ruiyang and\n Li, Zhongli and\n Cao, Yunbo and\n Zheng, Haitao and\n Shen, Ying\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.40/\",\n doi = \"10.18653/v1/2022.findings-emnlp.40\",\n pages = \"576--589\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.40.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.40/", + "pdf_size": 4231294, + "gs_citation": 46, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2106376443926374794&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff": "Tsinghua Shenzhen International Graduate School, Tsinghua University; Tsinghua Shenzhen International Graduate School, Tsinghua University; Tsinghua Shenzhen International Graduate School, Tsinghua University; Tencent Cloud Xiaowei; Tsinghua Shenzhen International Graduate School, Tsinghua University; Tsinghua Shenzhen International Graduate School, Tsinghua University; Tsinghua Shenzhen International Graduate School, Tsinghua University; Department of Computer Science and Technology, Tsinghua University; Tencent Cloud Xiaowei; Tencent Cloud Xiaowei; Tsinghua Shenzhen International Graduate School, Tsinghua University+Peng Cheng Laboratory; School of Intelligent Systems Engineering, Sun-Yat Sen University", + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ; ; ; ; ; ;sz.tsinghua.edu.cn;mail.sysu.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ; ; ; ; ; ;sz.tsinghua.edu.cn;mail.sysu.edu.cn", + "github": "https://github.com/masr2000/CLG-CGEC", + "project": "", + "author_num": 12, + "aff_unique_index": "0;0;0;1;0;0;0;0;1;1;0+2;3", + "aff_unique_norm": "Tsinghua University;Tencent;Peng Cheng Laboratory;Sun Yat-sen University", + "aff_unique_dep": "International Graduate School;Tencent Cloud Xiaowei;;School of Intelligent Systems Engineering", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://cloud.tencent.com;http://www.pcl.ac.cn;http://www.sysu.edu.cn/", + "aff_unique_abbr": "THU;Tencent;PCL;SYSU", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.545", + "title": "LiteVL: Efficient Video-Language Learning with Enhanced Spatial-Temporal Modeling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent large-scale video-language pre-trained models have shown appealing performance on various downstream tasks. However, the pre-training process is computationally expensive due to the requirement of millions of video-text pairs and the redundant data structure of each video. To mitigate these problems, we propose LiteVL, which adapts a pre-trained image-language model BLIP into a video-text model directly on downstream tasks, without heavy pre-training. To enhance the temporal modeling lacking in the image-language model, we propose to add temporal attention modules in the image encoder of BLIP with dynamic temporal scaling. Besides the model-wise adaptation, we also propose a non-parametric pooling mechanism to adaptively reweight the fine-grained video embedding conditioned on the text. Experimental results on text-video retrieval and video question answering show that the proposed LiteVL even outperforms previous video-language pre-trained models by a clear margin, though without any video-language pre-training.", + "author": "Dongsheng Chen; Chaofan Tao; Lu Hou; Lifeng Shang; Xin Jiang; Qun Liu", + "authorids": "/d/dongsheng-chen/; /c/chaofan-tao/; /l/lu-hou/; /l/lifeng-shang/; /x/xin-jiang/; /q/qun-liu/", + "bibtex": "@inproceedings{chen-etal-2022-litevl,\n title = \"{L}ite{VL}: Efficient Video-Language Learning with Enhanced Spatial-Temporal Modeling\",\n author = \"Chen, Dongsheng and\n Tao, Chaofan and\n Hou, Lu and\n Shang, Lifeng and\n Jiang, Xin and\n Liu, Qun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.545/\",\n doi = \"10.18653/v1/2022.emnlp-main.545\",\n pages = \"7985--7997\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.545.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.545/", + "pdf_size": 1640703, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=618712483852188473&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Peking University; The University of Hong Kong; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab", + "aff_domain": "stu.pku.edu.cn;connect.hku.hk;huawei.com;huawei.com;huawei.com;huawei.com", + "email": "stu.pku.edu.cn;connect.hku.hk;huawei.com;huawei.com;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;2;2;2", + "aff_unique_norm": "Peking University;The University of Hong Kong;Huawei", + "aff_unique_dep": ";;Noah\u2019s Ark Lab", + "aff_unique_url": "http://www.pku.edu.cn;https://www.hku.hk;https://www.huawei.com", + "aff_unique_abbr": "Peking U;HKU;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.352", + "title": "LittleBird: Efficient Faster & Longer Transformer for Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "BERT has shown a lot of sucess in a wide variety of NLP tasks. But it has a limitation dealing with long inputs due to its attention mechanism. Longformer, ETC and BigBird addressed this issue and effectively solved the quadratic dependency problem.However we find that these models are not sufficient, and propose LittleBird, a novel model based on BigBird with improved speed and memory footprint while maintaining accuracy.In particular, we devise a more flexible and efficient position representation method based on Attention with Linear Biases(ALiBi). We also show that replacing the method of global information represented in the BigBird with pack and unpack attention is more effective.The proposed model can work on long inputs even after being pre-trained on short inputs, and can be trained efficiently reusing existing pre-trained language model for short inputs. This is a significant benefit for low-resource languages where large amounts of long text data are difficult to obtain.As a result, our experiments show that LittleBird works very well in a variety of languages, achieving high performance in question answering tasks, particularly in KorQuAD2.0, Korean Question Answering Dataset for long paragraphs.", + "author": "Minchul Lee; Kijong Han; Myeong Cheol Shin", + "authorids": "/m/minchul-lee/; /k/kijong-han/; /m/myeong-cheol-shin/", + "bibtex": "@inproceedings{lee-etal-2022-littlebird,\n title = \"{L}ittle{B}ird: Efficient Faster {\\&} Longer Transformer for Question Answering\",\n author = \"Lee, Minchul and\n Han, Kijong and\n Shin, Myeong Cheol\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.352/\",\n doi = \"10.18653/v1/2022.emnlp-main.352\",\n pages = \"5261--5277\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.352.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.352/", + "pdf_size": 5893126, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6932897525098386938&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Kakao Enterprise Corp., South Korea; Kakao Enterprise Corp., South Korea; Kakao Enterprise Corp., South Korea", + "aff_domain": "kakaoenterprise.com;kakaoenterprise.com;kakaoenterprise.com", + "email": "kakaoenterprise.com;kakaoenterprise.com;kakaoenterprise.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Kakao Enterprise Corp.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.kakaoenterprisecorp.com", + "aff_unique_abbr": "KEC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.265", + "title": "LogicNMR: Probing the Non-monotonic Reasoning Ability of Pre-trained Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The logical reasoning capabilities of pre-trained language models have recently received much attention. As one of the vital reasoning paradigms, non-monotonic reasoning refers to the fact that conclusions may be invalidated with new information. Existing work has constructed a non-monotonic inference dataset \ud835\udeff-NLI and explored the performance of language models on it. However, the \ud835\udeff-NLI dataset is entangled with commonsense reasoning. In this paper, we explore the pure non-monotonic reasoning ability of pre-trained language models. We build a non-monotonic reasoning benchmark, named LogicNMR, with explicit default rules and iterative updates. In the experimental part, the performance of popular language models on LogicNMR is explored from the perspectives of accuracy, generalization, proof-based traceability and robustness. The experimental results show that even though the fine-tuned language models achieve an accuracy of more than 94.4% on LogicNMR, they perform unsatisfactorily, with a significant drop, in generalization and proof-based traceability.", + "author": "Yeliang Xiu; Zhanhao Xiao; Yongmei Liu", + "authorids": "/y/yeliang-xiu/; /z/zhanhao-xiao/; /y/yongmei-liu/", + "bibtex": "@inproceedings{xiu-etal-2022-logicnmr,\n title = \"{L}ogic{NMR}: Probing the Non-monotonic Reasoning Ability of Pre-trained Language Models\",\n author = \"Xiu, Yeliang and\n Xiao, Zhanhao and\n Liu, Yongmei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.265/\",\n doi = \"10.18653/v1/2022.findings-emnlp.265\",\n pages = \"3616--3626\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.265.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.265/", + "pdf_size": 1123142, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=834239794703682003&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Dept. of Computer Science, Sun Yat-sen University; School of Computer Science, Guangdong Polytechnic Normal University; Dept. of Computer Science, Sun Yat-sen University", + "aff_domain": "mail2.sysu.edu.cn;gpnu.edu.cn;mail.sysu.edu.cn", + "email": "mail2.sysu.edu.cn;gpnu.edu.cn;mail.sysu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Sun Yat-sen University;Guangdong Polytechnic Normal University", + "aff_unique_dep": "Dept. of Computer Science;School of Computer Science", + "aff_unique_url": "http://www.sysu.edu.cn;http://www.gdpu.edu.cn", + "aff_unique_abbr": "SYSU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.1", + "title": "LogicSolver: Towards Interpretable Math Word Problem Solving with Logical Prompt-enhanced Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recently, deep learning models have made great progress in MWP solving on answer accuracy. However, they are uninterpretable since they mainly rely on shallow heuristics to achieve high performance without understanding and reasoning the grounded math logic. To address this issue and make a step towards interpretable MWP solving, we first construct a high-quality MWP dataset named InterMWP which consists of 11,495 MWPs and annotates interpretable logical formulas based on algebraic knowledge as the grounded linguistic logic of each solution equation. Different from existing MWP datasets, our InterMWP benchmark asks for a solver to not only output the solution expressions but also predict the corresponding logical formulas. We further propose a novel approach with logical prompt and interpretation generation, called LogicSolver. For each MWP, our LogicSolver first retrieves some highly-correlated algebraic knowledge and then passes them to the backbone model as prompts to improve the semantic representations of MWPs. With these improved semantic representations, our LogicSolver generates corresponding solution expressions and interpretable knowledge formulas in accord with the generated solution expressions, simultaneously. Experimental results show that our LogicSolver has stronger logical formula-based interpretability than baselines while achieving higher answer accuracy with the help of logical prompts, simultaneously. The source code and dataset will be available at https://github.com/yangzhch6/InterMWP.", + "author": "Zhicheng Yang; Jinghui Qin; Jiaqi Chen; Liang Lin; Xiaodan Liang", + "authorids": "/z/zhicheng-yang/; /j/jinghui-qin/; /j/jiaqi-chen/; /l/liang-lin/; /x/xiaodan-liang/", + "bibtex": "@inproceedings{yang-etal-2022-logicsolver,\n title = \"{L}ogic{S}olver: Towards Interpretable Math Word Problem Solving with Logical Prompt-enhanced Learning\",\n author = \"Yang, Zhicheng and\n Qin, Jinghui and\n Chen, Jiaqi and\n Lin, Liang and\n Liang, Xiaodan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.1/\",\n doi = \"10.18653/v1/2022.findings-emnlp.1\",\n pages = \"1--13\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.1.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.1/", + "pdf_size": 1245078, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12271104294962391630&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff": "Shenzhen Campus of Sun Yat-sen University+Sun Yat-sen University; Guangdong University of Technology; Sun Yat-sen University+Dark Matter AI Inc.; Sun Yat-sen University; Shenzhen Campus of Sun Yat-sen University+Sun Yat-sen University", + "aff_domain": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;gmail.com;ieee.org;gmail.com", + "email": "mail2.sysu.edu.cn;mail2.sysu.edu.cn;gmail.com;ieee.org;gmail.com", + "github": "https://github.com/yangzhch6/InterMWP", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;1;0+2;0;0+0", + "aff_unique_norm": "Sun Yat-sen University;Guangdong University of Technology;Dark Matter AI Inc.", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.sysu.edu.cn/;http://www.gdut.edu.cn;", + "aff_unique_abbr": "SYSU;GDUT;", + "aff_campus_unique_index": "0;;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0;0+1;0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.532", + "title": "Logical Fallacy Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Reasoning is central to human intelligence. However, fallacious arguments are common, and some exacerbate problems such as spreading misinformation about climate change. In this paper, we propose the task of logical fallacy detection, and provide a new dataset (Logic) of logical fallacies generally found in text, together with an additional challenge set for detecting logical fallacies in climate change claims (LogicClimate). Detecting logical fallacies is a hard problem as the model must understand the underlying logical structure of the argument. We find that existing pretrained large language models perform poorly on this task. In contrast, we show that a simple structure-aware classifier outperforms the best language model by 5.46% F1 scores on Logic and 4.51% on LogicClimate. We encourage future work to explore this task since (a) it can serve as a new reasoning challenge for language models, and (b) it can have potential applications in tackling the spread of misinformation. Our dataset and code are available at https://github.com/causalNLP/logical-fallacy", + "author": "Zhijing Jin; Abhinav Lalwani; Tejas Vaidhya; Xiaoyu Shen; Yiwen Ding; Zhiheng Lyu; Mrinmaya Sachan; Rada Mihalcea; Bernhard Schoelkopf", + "authorids": "/z/zhijing-jin/; /a/abhinav-lalwani/; /t/tejas-vaidhya/; /x/xiaoyu-shen/; /y/yiwen-ding/; /z/zhiheng-lyu/; /m/mrinmaya-sachan/; /r/rada-mihalcea/; /b/bernhard-schoelkopf/", + "bibtex": "@inproceedings{jin-etal-2022-logical,\n title = \"Logical Fallacy Detection\",\n author = \"Jin, Zhijing and\n Lalwani, Abhinav and\n Vaidhya, Tejas and\n Shen, Xiaoyu and\n Ding, Yiwen and\n Lyu, Zhiheng and\n Sachan, Mrinmaya and\n Mihalcea, Rada and\n Schoelkopf, Bernhard\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.532/\",\n doi = \"10.18653/v1/2022.findings-emnlp.532\",\n pages = \"7180--7198\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.532.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.532/", + "pdf_size": 847734, + "gs_citation": 88, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8405949215174367548&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Max Planck Institute+ETH Z\u00fcrich; BITS Pilani; IIT Kharagpur; Saarland Informatics Campus; University of Michigan; University of Hong Kong+ETH Z\u00fcrich; ETH Z\u00fcrich; University of Michigan; Max Planck Institute+ETH Z\u00fcrich", + "aff_domain": "ethz.ch;gmail.com; ; ; ; ; ;umich.edu; ", + "email": "ethz.ch;gmail.com; ; ; ; ; ;umich.edu; ", + "github": "https://github.com/causalNLP/logical-fallacy", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;2;3;4;5;6+1;1;5;0+1", + "aff_unique_norm": "Max Planck Institute;ETH Z\u00fcrich;Birla Institute of Technology and Science, Pilani;Indian Institute of Technology Kharagpur;Saarland University;University of Michigan;University of Hong Kong", + "aff_unique_dep": ";;;;Department of Computer Science;;", + "aff_unique_url": "https://www.mpiwg-berlin.mpg.de;https://www.ethz.ch;https://www.bits-pilani.ac.in;https://www.iitkgp.ac.in;https://www.uni-saarland.de;https://www.umich.edu;https://www.hku.hk", + "aff_unique_abbr": "MPI;ETHZ;BITS Pilani;IIT KGP;Uni Saar;UM;HKU", + "aff_campus_unique_index": ";1;2;3;;", + "aff_campus_unique": ";Pilani;Kharagpur;Saarbr\u00fccken", + "aff_country_unique_index": "0+1;2;2;0;3;4+1;1;3;0+1", + "aff_country_unique": "Germany;Switzerland;India;United States;China" + }, + { + "id": "2022.emnlp-main.255", + "title": "Logical Neural Networks for Knowledge Base Completion with Embeddings & Rules", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge base completion (KBC) has benefitted greatly by learning explainable rules in an human-interpretable dialect such as first-order logic. Rule-based KBC has so far, mainly focussed on learning one of two types of rules: conjunction-of-disjunctions and disjunction-of-conjunctions. We qualitatively show, via examples, that one of these has an advantage over the other when it comes to achieving high quality KBC. To the best of our knowledge, we are the first to propose learning both kinds of rules within a common framework. To this end, we propose to utilize logical neural networks (LNN), a powerful neuro-symbolic AI framework that can express both kinds of rules and learn these end-to-end using gradient-based optimization. Our in-depth experiments show that our LNN-based approach to learning rules for KBC leads to roughly 10% relative improvements, if not more, over SotA rule-based KBC methods. Moreover, by showing how to combine our proposed methods with knowledge graph embeddings we further achieve an additional 7.5% relative improvement.", + "author": "Prithviraj Sen; Breno William Carvalho; Ibrahim Abdelaziz; Pavan Kapanipathi; Salim Roukos; Alexander Gray", + "authorids": "/p/prithviraj-sen/; /b/breno-william-carvalho/; /i/ibrahim-abdelaziz/; /p/pavan-kapanipathi/; /s/salim-roukos/; /a/alexander-gray/", + "bibtex": "@inproceedings{sen-etal-2022-logical,\n title = \"Logical Neural Networks for Knowledge Base Completion with Embeddings {\\&} Rules\",\n author = \"Sen, Prithviraj and\n Carvalho, Breno William and\n Abdelaziz, Ibrahim and\n Kapanipathi, Pavan and\n Roukos, Salim and\n Gray, Alexander\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.255/\",\n doi = \"10.18653/v1/2022.emnlp-main.255\",\n pages = \"3863--3875\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.255.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.255/", + "pdf_size": 2213925, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17812012802488146242&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Amazon; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research", + "aff_domain": "; ; ; ; ; ", + "email": "; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;1", + "aff_unique_norm": "Amazon.com, Inc.;IBM", + "aff_unique_dep": ";IBM Research", + "aff_unique_url": "https://www.amazon.com;https://www.ibm.com/research", + "aff_unique_abbr": "Amazon;IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.251", + "title": "Logical Reasoning with Span-Level Predictions for Interpretable and Robust NLI Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Current Natural Language Inference (NLI) models achieve impressive results, sometimes outperforming humans when evaluating on in-distribution test sets. However, as these models are known to learn from annotation artefacts and dataset biases, it is unclear to what extent the models are learning the task of NLI instead of learning from shallow heuristics in their training data.We address this issue by introducing a logical reasoning framework for NLI, creating highly transparent model decisions that are based on logical rules. Unlike prior work, we show that improved interpretability can be achieved without decreasing the predictive accuracy. We almost fully retain performance on SNLI, while also identifying the exact hypothesis spans that are responsible for each model prediction.Using the e-SNLI human explanations, we verify that our model makes sensible decisions at a span level, despite not using any span labels during training. We can further improve model performance and the span-level decisions by using the e-SNLI explanations during training. Finally, our model is more robust in a reduced data setting. When training with only 1,000 examples, out-of-distribution performance improves on the MNLI matched and mismatched validation sets by 13% and 16% relative to the baseline. Training with fewer observations yields further improvements, both in-distribution and out-of-distribution.", + "author": "Joe Stacey; Pasquale Minervini; Haim Dubossarsky; Marek Rei", + "authorids": "/j/joe-stacey/; /p/pasquale-minervini/; /h/haim-dubossarsky/; /m/marek-rei/", + "bibtex": "@inproceedings{stacey-etal-2022-logical,\n title = \"Logical Reasoning with Span-Level Predictions for Interpretable and Robust {NLI} Models\",\n author = \"Stacey, Joe and\n Minervini, Pasquale and\n Dubossarsky, Haim and\n Rei, Marek\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.251/\",\n doi = \"10.18653/v1/2022.emnlp-main.251\",\n pages = \"3809--3823\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.251.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.251/", + "pdf_size": 1519572, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9897345600628240444&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Imperial College London; University of Edinburgh + UCL; Queen Mary University of London; Imperial College London", + "aff_domain": "imperial.ac.uk;ed.ac.uk;qmul.ac.uk;imperial.ac.uk", + "email": "imperial.ac.uk;ed.ac.uk;qmul.ac.uk;imperial.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1+2;3;0", + "aff_unique_norm": "Imperial College London;University of Edinburgh;University College London;Queen Mary University of London", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.imperial.ac.uk;https://www.ed.ac.uk;https://www.ucl.ac.uk;https://www.qmul.ac.uk", + "aff_unique_abbr": "ICL;Edinburgh;UCL;QMUL", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";London", + "aff_country_unique_index": "0;0+0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.554", + "title": "Long Text Generation with Topic-aware Discrete Latent Variable Model", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Generating coherent long texts is an important yet challenging task, particularly forthe open-ended generation. Prior work based on discrete latent codes focuses on the modeling of discourse relation, resulting in discrete codes only learning shallow semantics (Ji and Huang, 2021). A natural text always revolves around several related topics and the transition across them is natural and smooth.In this work, we investigate whether discrete latent codes can learn information of topics. To this end, we build a topic-aware latent code-guided text generation model. To encourage discrete codes to model information about topics, we propose a span-level bag-of-words training objective for the model. Automatic and manual evaluation experiments show that our method can generate more topic-relevant and coherent texts.", + "author": "Erguang Yang; Mingtong Liu; Deyi Xiong; Yujie Zhang; Yufeng Chen; Jinan Xu", + "authorids": "/e/erguang-yang/; /m/mingtong-liu/; /d/deyi-xiong/; /y/yujie-zhang/; /y/yufeng-chen/; /j/jinan-xu/", + "bibtex": "@inproceedings{yang-etal-2022-long,\n title = \"Long Text Generation with Topic-aware Discrete Latent Variable Model\",\n author = \"Yang, Erguang and\n Liu, Mingtong and\n Xiong, Deyi and\n Zhang, Yujie and\n Chen, Yufeng and\n Xu, Jinan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.554/\",\n doi = \"10.18653/v1/2022.emnlp-main.554\",\n pages = \"8100--8107\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.554.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.554/", + "pdf_size": 347651, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10252038374604102493&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China+Beijing Lanzhou Technology Co., Ltd., Beijing, China; Beijing Lanzhou Technology Co., Ltd., Beijing, China; College of Intelligence and Computing, Tianjin University, Tianjin, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China; School of Computer and Information Technology, Beijing Jiaotong University, Beijing, China", + "aff_domain": "bjtu.edu.cn;langboat.com;tju.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn", + "email": "bjtu.edu.cn;langboat.com;tju.edu.cn;bjtu.edu.cn;bjtu.edu.cn;bjtu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;2;0;0;0", + "aff_unique_norm": "Beijing Jiaotong University;Beijing Lanzhou Technology Co., Ltd.;Tianjin University", + "aff_unique_dep": "School of Computer and Information Technology;;College of Intelligence and Computing", + "aff_unique_url": "http://www.bjtu.edu.cn;;http://www.tju.edu.cn", + "aff_unique_abbr": "BJTU;;Tianjin University", + "aff_campus_unique_index": "0;2;0;0;0", + "aff_campus_unique": "Beijing;;Tianjin", + "aff_country_unique_index": "0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.145", + "title": "Long Text and Multi-Table Summarization: Dataset and Method", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Automatic document summarization aims to produce a concise summary covering the input document\u2019s salient information. Within a report document, the salient information can be scattered in the textual and non-textual content. However, existing document summarization datasets and methods usually focus on the text and filter out the non-textual content. Missing tabular data can limit produced summaries\u2019 informativeness, especially when summaries require covering quantitative descriptions of critical metrics in tables. Existing datasets and methods cannot meet the requirements of summarizing long text and multiple tables in each report. To deal with the scarcity of available data, we propose FINDSum, the first large-scale dataset for long text and multi-table summarization. Built on 21,125 annual reports from 3,794 companies, it has two subsets for summarizing each company\u2019s results of operations and liquidity. To summarize the long text and dozens of tables in each report, we present three types of summarization methods. Besides, we propose a set of evaluation metrics to assess the usage of numerical information in produced summaries. Dataset analyses and experimental results indicate the importance of jointly considering input textual and tabular data when summarizing report documents.", + "author": "Shuaiqi Liu; Jiannong Cao; Ruosong Yang; Zhiyuan Wen", + "authorids": "/s/shuaiqi-liu/; /j/jiannong-cao/; /r/ruosong-yang/; /z/zhiyuan-wen/", + "bibtex": "@inproceedings{liu-etal-2022-long,\n title = \"Long Text and Multi-Table Summarization: Dataset and Method\",\n author = \"Liu, Shuaiqi and\n Cao, Jiannong and\n Yang, Ruosong and\n Wen, Zhiyuan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.145/\",\n doi = \"10.18653/v1/2022.findings-emnlp.145\",\n pages = \"1995--2010\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.145.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.145/", + "pdf_size": 643560, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5583282639240157973&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "The Hong Kong Polytechnic University; The Hong Kong Polytechnic University; The Hong Kong Polytechnic University; The Hong Kong Polytechnic University", + "aff_domain": "comp.polyu.edu.hk;comp.polyu.edu.hk;comp.polyu.edu.hk;comp.polyu.edu.hk", + "email": "comp.polyu.edu.hk;comp.polyu.edu.hk;comp.polyu.edu.hk;comp.polyu.edu.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "The Hong Kong Polytechnic University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.polyu.edu.hk", + "aff_unique_abbr": "PolyU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.725", + "title": "Looking at the Overlooked: An Analysis on the Word-Overlap Bias in Natural Language Inference", + "track": "main", + "status": "Main", + "award": false, + "abstract": "It has been shown that NLI models are usually biased with respect to the word-overlap between the premise and the hypothesis, as they take this feature as a primary cue for predicting the entailment label. In this paper, we focus on an overlooked aspect of the overlap bias in the NLI models: the reverse word-overlap bias. Our experimental results demonstrate that current NLI systems are also highly biased towards the non-entailment label on instances with low overlap and that existing debiasing methods, which are reportedly successful on challenge datasets, are generally ineffective in addressing this category of bias.Through a set of analyses, we investigate the reasons for the emergence of the overlap bias and the role of minority examples in mitigating this bias.For the former, we find that the word overlap bias does not stem from pre-training, and in the latter, we observe that in contrast to the accepted assumption, eliminating minority examples does not affect the generalizability of debiasing methods with respect to the overlap bias.", + "author": "Sara Rajaee; Yadollah Yaghoobzadeh; Mohammad Taher Pilehvar", + "authorids": "/s/sara-rajaee/; /y/yadollah-yaghoobzadeh/; /m/mohammad-taher-pilehvar/", + "bibtex": "@inproceedings{rajaee-etal-2022-looking,\n title = \"Looking at the Overlooked: An Analysis on the Word-Overlap Bias in Natural Language Inference\",\n author = \"Rajaee, Sara and\n Yaghoobzadeh, Yadollah and\n Pilehvar, Mohammad Taher\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.725/\",\n doi = \"10.18653/v1/2022.emnlp-main.725\",\n pages = \"10605--10616\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.725.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.725/", + "pdf_size": 1086581, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14508813526586795084&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Amsterdam, Netherlands; University of Tehran, Iran; Tehran Institute for Advanced Studies + Khatam University, Iran", + "aff_domain": "uva.nl;ut.ac.ir;cam.ac.uk", + "email": "uva.nl;ut.ac.ir;cam.ac.uk", + "github": "https://github.com/sara-rajaee/reverse_bias", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2+3", + "aff_unique_norm": "University of Amsterdam;University of Tehran;Tehran Institute for Advanced Studies;Khatam University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.uva.nl;https://ut.ac.ir;http://www.tias.ir;http://www.kut.ac.ir", + "aff_unique_abbr": "UvA;UT;;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1+1", + "aff_country_unique": "Netherlands;Iran" + }, + { + "id": "2022.findings-emnlp.235", + "title": "Low-resource Interactive Active Labeling for Fine-tuning Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recently, active learning (AL) methods have been used to effectively fine-tune pre-trained language models for various NLP tasks such as sentiment analysis and document classification. However, given the task of fine-tuning language models, understanding the impact of different aspects on AL methods such as labeling cost, sample acquisition latency, and the diversity of the datasets necessitates a deeper investigation. This paper examines the performance of existing AL methods within a low-resource, interactive labeling setting. We observe that existing methods often underperform in such a setting while exhibiting higher latency and a lack of generalizability. To overcome these challenges, we propose a novel active learning method TYROUGE that employs a hybrid sampling strategy to minimize labeling cost and acquisition latency while providing a framework for adapting to dataset diversity via user guidance. Through our experiments, we observe that compared to SOTA methods, TYROUGE reduces the labeling cost by up to 43% and the acquisition latency by as much as 11X, while achieving comparable accuracy. Finally, we discuss the strengths and weaknesses of TYROUGE by exploring the impact of dataset characteristics.", + "author": "Seiji Maekawa; Dan Zhang; Hannah Kim; Sajjadur Rahman; Estevam Hruschka", + "authorids": "/s/seiji-maekawa/; /d/dan-zhang/; /h/hannah-kim/; /s/sajjadur-rahman/; /e/estevam-hruschka/", + "bibtex": "@inproceedings{maekawa-etal-2022-low,\n title = \"Low-resource Interactive Active Labeling for Fine-tuning Language Models\",\n author = \"Maekawa, Seiji and\n Zhang, Dan and\n Kim, Hannah and\n Rahman, Sajjadur and\n Hruschka, Estevam\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.235/\",\n doi = \"10.18653/v1/2022.findings-emnlp.235\",\n pages = \"3230--3242\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.235.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.235/", + "pdf_size": 4574821, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4048624919797962688&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "Osaka University; Megagon Labs; Megagon Labs; Megagon Labs; Megagon Labs", + "aff_domain": "ist.osaka-u.ac.jp;megagon.ai;megagon.ai;megagon.ai;megagon.ai", + "email": "ist.osaka-u.ac.jp;megagon.ai;megagon.ai;megagon.ai;megagon.ai", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;1", + "aff_unique_norm": "Osaka University;Megagon Labs", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.osaka-u.ac.jp;https://www.megagonlabs.com", + "aff_unique_abbr": "Osaka U;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1", + "aff_country_unique": "Japan;United States" + }, + { + "id": "2022.emnlp-main.689", + "title": "Low-resource Neural Machine Translation with Cross-modal Alignment", + "track": "main", + "status": "Main", + "award": false, + "abstract": "How to achieve neural machine translation with limited parallel data? Existing techniques often rely on large-scale monolingual corpus, which is impractical for some low-resource languages. In this paper, we turn to connect several low-resource languages to a particular high-resource one by additional visual modality. Specifically, we propose a cross-modal contrastive learning method to learn a shared space for all languages, where both a coarse-grained sentence-level objective and a fine-grained token-level one are introduced. Experimental results and further analysis show that our method can effectively learn the cross-modal and cross-lingual alignment with a small amount of image-text pairs, and achieves significant improvements over the text-only baseline under both zero-shot and few-shot scenarios.", + "author": "Zhe Yang; Qingkai Fang; Yang Feng", + "authorids": "/z/zhe-yang/; /q/qingkai-fang/; /y/yang-feng/", + "bibtex": "@inproceedings{yang-etal-2022-low,\n title = \"Low-resource Neural Machine Translation with Cross-modal Alignment\",\n author = \"Yang, Zhe and\n Fang, Qingkai and\n Feng, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.689/\",\n doi = \"10.18653/v1/2022.emnlp-main.689\",\n pages = \"10134--10146\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.689.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.689/", + "pdf_size": 2168845, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4992309280392879719&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 4, + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences, Beijing, China; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences, Beijing, China; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences, Beijing, China", + "aff_domain": "ict.ac.cn;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;ict.ac.cn", + "github": "https://github.com/ictnlp/LNMT-CA", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Computing Technology;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.63", + "title": "M2D2: A Massively Multi-Domain Language Modeling Dataset", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present M2D2, a fine-grained, massively multi-domain corpus for studying domain adaptation in language models (LMs). M2D2 consists of 8.5B tokens and spans 145 domains extracted from Wikipedia and Semantic Scholar. Using ontologies derived from Wikipedia and ArXiv categories, we organize the domains in each data source into 22 groups. This two-level hierarchy enables the study of relationships between domains and their effects on in- and out-of-domain performance after adaptation. We also present a number of insights into the nature of effective domain adaptation in LMs, as examples of the new types of studies M2D2 enables. To improve in-domain performance, we show the benefits of adapting the LM along a domain hierarchy; adapting to smaller amounts of fine-grained domain-specific data can lead to larger in-domain performance gains than larger amounts of weakly relevant data. We further demonstrate a trade-off between in-domain specialization and out-of-domain generalization within and across ontologies, as well as a strong correlation between out-of-domain performance and lexical overlap between domains.", + "author": "Machel Reid; Victor Zhong; Suchin Gururangan; Luke Zettlemoyer", + "authorids": "/m/machel-reid/; /v/victor-zhong/; /s/suchin-gururangan/; /l/luke-zettlemoyer/", + "bibtex": "@inproceedings{reid-etal-2022-m2d2,\n title = \"{M}2{D}2: A Massively Multi-Domain Language Modeling Dataset\",\n author = \"Reid, Machel and\n Zhong, Victor and\n Gururangan, Suchin and\n Zettlemoyer, Luke\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.63/\",\n doi = \"10.18653/v1/2022.emnlp-main.63\",\n pages = \"964--975\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.63.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.63/", + "pdf_size": 774124, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12896836986126078636&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff": "The University of Tokyo; University of Washington; University of Washington; University of Washington", + "aff_domain": "google.com;cs.washington.edu;cs.washington.edu;cs.washington.edu", + "email": "google.com;cs.washington.edu;cs.washington.edu;cs.washington.edu", + "github": "https://github.com/machelreid/m2d2", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;1", + "aff_unique_norm": "University of Tokyo;University of Washington", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.washington.edu", + "aff_unique_abbr": "UTokyo;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1", + "aff_country_unique": "Japan;United States" + }, + { + "id": "2022.emnlp-main.94", + "title": "M3: A Multi-View Fusion and Multi-Decoding Network for Multi-Document Reading Comprehension", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multi-document reading comprehension task requires collecting evidences from different documents for answering questions. Previous research works either use the extractive modeling method to naively integrate the scores from different documents on the encoder side or use the generative modeling method to collect the clues from different documents on the decoder side individually. However, any single modeling method cannot make full of the advantages of both. In this work, we propose a novel method that tries to employ a multi-view fusion and multi-decoding mechanism to achieve it. For one thing, our approach leverages question-centered fusion mechanism and cross-attention mechanism to gather fine-grained fusion of evidence clues from different documents in the encoder and decoder concurrently. For another, our method simultaneously employs both the extractive decoding approach and the generative decoding method to effectively guide the training process. Compared with existing methods, our method can perform both extractive decoding and generative decoding independently and optionally. Our experiments on two mainstream multi-document reading comprehension datasets (Natural Questions and TriviaQA) demonstrate that our method can provide consistent improvements over previous state-of-the-art methods.", + "author": "Liang Wen; Houfeng Wang; Yingwei Luo; Xiaolin Wang", + "authorids": "/l/liang-wen/; /h/houfeng-wang/; /y/yingwei-luo/; /x/xiaolin-wang/", + "bibtex": "@inproceedings{wen-etal-2022-m3,\n title = \"{M}3: A Multi-View Fusion and Multi-Decoding Network for Multi-Document Reading Comprehension\",\n author = \"Wen, Liang and\n Wang, Houfeng and\n Luo, Yingwei and\n Wang, Xiaolin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.94/\",\n doi = \"10.18653/v1/2022.emnlp-main.94\",\n pages = \"1450--1461\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.94.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.94/", + "pdf_size": 681634, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9458551261519145393&as_sdt=20000005&sciodt=0,21&hl=en", + "gs_version_total": 0, + "aff": "School of Computer Science, Peking University, China+Peng Cheng Laboratory, Shenzhen, China; School of Computer Science, Peking University, China+Peng Cheng Laboratory, Shenzhen, China; School of Computer Science, Peking University, China+Peng Cheng Laboratory, Shenzhen, China; School of Computer Science, Peking University, China+Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0+1;0+1", + "aff_unique_norm": "Peking University;Peng Cheng Laboratory", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "http://www.pku.edu.cn;", + "aff_unique_abbr": "Peking U;", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.286", + "title": "M3: Multi-level dataset for Multi-document summarisation of Medical studies", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We present M3 (Multi-level dataset for Multi-document summarisation of Medical studies), a benchmark dataset for evaluating the quality of summarisation systems in the biomedical domain. The dataset contains sets of multiple input documents and target summaries of three levels of complexity: documents, sentences, and propositions. The dataset also includes several levels of annotation, including biomedical entities, direction, and strength of relations between them, and the discourse relationships between the input documents (\u201ccontradiction\u201d or \u201cagreement\u201d). We showcase usage scenarios of the dataset by testing 10 generic and domain-specific summarisation models in a zero-shot setting, and introduce a probing task based on counterfactuals to test if models are aware of the direction and strength of the conclusions generated from input studies.", + "author": "Yulia Otmakhova; Karin Verspoor; Timothy Baldwin; Antonio Jimeno Yepes; Jey Han Lau", + "authorids": "/j/julia-otmakhova/; /k/karin-verspoor/; /t/timothy-baldwin/; /a/antonio-jimeno-yepes/; /j/jey-han-lau/", + "bibtex": "@inproceedings{otmakhova-etal-2022-m3,\n title = \"{M}3: Multi-level dataset for Multi-document summarisation of Medical studies\",\n author = \"Otmakhova, Yulia and\n Verspoor, Karin and\n Baldwin, Timothy and\n Jimeno Yepes, Antonio and\n Lau, Jey Han\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.286/\",\n doi = \"10.18653/v1/2022.findings-emnlp.286\",\n pages = \"3887--3901\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.286.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.286/", + "pdf_size": 920189, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18047835453592068136&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 5, + "aff": "The University of Melbourne; RMIT University+The University of Melbourne; The University of Melbourne+MBZUAI; RMIT University; The University of Melbourne", + "aff_domain": "student.unimelb.edu.au;rmit.edu.au;ldwin.net;rmit.edu.au;gmail.com", + "email": "student.unimelb.edu.au;rmit.edu.au;ldwin.net;rmit.edu.au;gmail.com", + "github": "https://github.com/julia-nixie/m3", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+0;0+2;1;0", + "aff_unique_norm": "University of Melbourne;RMIT University;Mohamed Bin Zayed University of Artificial Intelligence", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.unimelb.edu.au;https://www.rmit.edu.au;https://www.mbzuai.ac.ae", + "aff_unique_abbr": "UniMelb;RMIT;MBZUAI", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0+1;0;0", + "aff_country_unique": "Australia;United Arab Emirates" + }, + { + "id": "2022.emnlp-main.657", + "title": "MABEL: Attenuating Gender Bias using Textual Entailment Data", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained language models encode undesirable social biases, which are further exacerbated in downstream use. To this end, we propose MABEL (a Method for Attenuating Gender Bias using Entailment Labels), an intermediate pre-training approach for mitigating gender bias in contextualized representations. Key to our approach is the use of a contrastive learning objective on counterfactually augmented, gender-balanced entailment pairs from natural language inference (NLI) datasets. We also introduce an alignment regularizer that pulls identical entailment pairs along opposite gender directions closer. We extensively evaluate our approach on intrinsic and extrinsic metrics, and show that MABEL outperforms previous task-agnostic debiasing approaches in terms of fairness. It also preserves task performance after fine-tuning on downstream tasks. Together, these findings demonstrate the suitability of NLI data as an effective means of bias mitigation, as opposed to only using unlabeled sentences in the literature. Finally, we identify that existing approaches often use evaluation settings that are insufficient or inconsistent. We make an effort to reproduce and compare previous methods, and call for unifying the evaluation settings across gender debiasing methods for better future comparison.", + "author": "Jacqueline He; Mengzhou Xia; Christiane Fellbaum; Danqi Chen", + "authorids": "/j/jacqueline-he/; /m/mengzhou-xia/; /c/christiane-fellbaum/; /d/danqi-chen/", + "bibtex": "@inproceedings{he-etal-2022-mabel,\n title = \"{MABEL}: Attenuating Gender Bias using Textual Entailment Data\",\n author = \"He, Jacqueline and\n Xia, Mengzhou and\n Fellbaum, Christiane and\n Chen, Danqi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.657/\",\n doi = \"10.18653/v1/2022.emnlp-main.657\",\n pages = \"9681--9702\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.657.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.657/", + "pdf_size": 1240861, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16974950700626145191&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science, Princeton University; Department of Computer Science, Princeton University; Department of Computer Science, Princeton University; Department of Computer Science, Princeton University", + "aff_domain": "gmail.com;cs.princeton.edu;cs.princeton.edu;cs.princeton.edu", + "email": "gmail.com;cs.princeton.edu;cs.princeton.edu;cs.princeton.edu", + "github": "https://github.com/princeton-nlp/MABEL", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Princeton University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.princeton.edu", + "aff_unique_abbr": "Princeton", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.179", + "title": "MAGMA \u2013 Multimodal Augmentation of Generative Models through Adapter-based Finetuning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large-scale pretraining is fast becoming the norm in Vision-Language (VL) modeling. However, prevailing VL approaches are limited by the requirement for labeled data and the use of complex multi-step pretraining objectives. We present MAGMA - a simple method for augmenting generative language models with additional modalities using adapter-based finetuning. Building on Frozen, we train a series of VL models that autoregressively generate text from arbitrary combinations of visual and textual input. The pretraining is entirely end-to-end using a single language modeling objective, simplifying optimization compared to previous approaches. Importantly, the language model weights remain unchanged during training, allowing for transfer of encyclopedic knowledge and in-context learning abilities from language pretraining. MAGMA outperforms Frozen on open-ended generative tasks, achieving state of the art results on the OKVQA benchmark and competitive results on a range of other popular VL benchmarks, while pretraining on 0.2 % of the number of samples used to train SimVLM.", + "author": "Constantin Eichenberg; Sidney Black; Samuel Weinbach; Letitia Parcalabescu; Anette Frank", + "authorids": "/c/constantin-eichenberg/; /s/sidney-black/; /s/samuel-weinbach/; /l/letitia-parcalabescu/; /a/anette-frank/", + "bibtex": "@inproceedings{eichenberg-etal-2022-magma,\n title = \"{MAGMA} {--} Multimodal Augmentation of Generative Models through Adapter-based Finetuning\",\n author = \"Eichenberg, Constantin and\n Black, Sidney and\n Weinbach, Samuel and\n Parcalabescu, Letitia and\n Frank, Anette\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.179/\",\n doi = \"10.18653/v1/2022.findings-emnlp.179\",\n pages = \"2416--2428\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.179.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.179/", + "pdf_size": 12504980, + "gs_citation": 116, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7157296873546480412&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Aleph Alpha; Aleph Alpha; Aleph Alpha + Heidelberg University; Heidelberg University; Heidelberg University", + "aff_domain": "aleph-alpha.com;gmail.com;aleph-alpha.com;cl.uni-heidelberg.de;cl.uni-heidelberg.de", + "email": "aleph-alpha.com;gmail.com;aleph-alpha.com;cl.uni-heidelberg.de;cl.uni-heidelberg.de", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+1;1;1", + "aff_unique_norm": "Aleph Alpha;Heidelberg University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.aleph-alpha.com;https://www.uni-heidelberg.de", + "aff_unique_abbr": ";Uni Heidelberg", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.207", + "title": "MANTa: Efficient Gradient-Based Tokenization for End-to-End Robust Language Modeling", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Static subword tokenization algorithms have been an essential component of recent works on language modeling. However, their static nature results in important flaws that degrade the models\u2019 downstream performance and robustness. In this work, we propose MANTa, a Module for Adaptive Neural TokenizAtion. MANTa is a differentiable tokenizer trained end-to-end with the language model. The resulting system offers a trade-off between the expressiveness of byte-level models and the speed of models trained using subword tokenization. In addition, our tokenizer is highly explainable since it produces an explicit segmentation of sequences into blocks. We evaluate our pre-trained model on several English datasets from different domains as well as on synthetic noise. We find that MANTa improves robustness to character perturbations and out-of-domain data. We then show that MANTa performs comparably to other models on the general-domain GLUE benchmark. Finally, we show that it is considerably faster than strictly byte-level models.", + "author": "Nathan Godey; Roman Castagn\u00e9; \u00c9ric de la Clergerie; Beno\u00eet Sagot", + "authorids": "/n/nathan-godey/; /r/roman-castagne/; /e/eric-villemonte-de-la-clergerie/; /b/benoit-sagot/", + "bibtex": "@inproceedings{godey-etal-2022-manta,\n title = \"{MANT}a: Efficient Gradient-Based Tokenization for End-to-End Robust Language Modeling\",\n author = \"Godey, Nathan and\n Castagn{\\'e}, Roman and\n de la Clergerie, {\\'E}ric and\n Sagot, Beno{\\^i}t\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.207/\",\n doi = \"10.18653/v1/2022.findings-emnlp.207\",\n pages = \"2859--2870\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.207.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.207/", + "pdf_size": 1777457, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15557247200251009703&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Inria, Paris, France + Sorbonne Universit\u00e9, Paris, France; Inria, Paris, France + Sorbonne Universit\u00e9, Paris, France; Inria, Paris, France; Inria, Paris, France", + "aff_domain": "inria.fr;inria.fr;inria.fr;inria.fr", + "email": "inria.fr;inria.fr;inria.fr;inria.fr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0;0", + "aff_unique_norm": "Inria;Sorbonne Universit\u00e9", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.inria.fr;https://www.sorbonne-universite.fr", + "aff_unique_abbr": "Inria;Sorbonne U", + "aff_campus_unique_index": "0+0;0+0;0;0", + "aff_campus_unique": "Paris", + "aff_country_unique_index": "0+0;0+0;0;0", + "aff_country_unique": "France" + }, + { + "id": "2022.emnlp-main.60", + "title": "MAVEN-ERE: A Unified Large-scale Dataset for Event Coreference, Temporal, Causal, and Subevent Relation Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The diverse relationships among real-world events, including coreference, temporal, causal, and subevent relations, are fundamental to understanding natural languages. However, two drawbacks of existing datasets limit event relation extraction (ERE) tasks: (1) Small scale. Due to the annotation complexity, the data scale of existing datasets is limited, which cannot well train and evaluate data-hungry models. (2) Absence of unified annotation. Different types of event relations naturally interact with each other, but existing datasets only cover limited relation types at once, which prevents models from taking full advantage of relation interactions. To address these issues, we construct a unified large-scale human-annotated ERE dataset MAVEN-ERE with improved annotation schemes. It contains 103,193 event coreference chains, 1,216,217 temporal relations, 57,992 causal relations, and 15,841 subevent relations, which is larger than existing datasets of all the ERE tasks by at least an order of magnitude. Experiments show that ERE on MAVEN-ERE is quite challenging, and considering relation interactions with joint learning can improve performances. The dataset and source codes can be obtained from https://github.com/THU-KEG/MAVEN-ERE.", + "author": "Xiaozhi Wang; Yulin Chen; Ning Ding; Hao Peng; Zimu Wang; Yankai Lin; Xu Han; Lei Hou; Juanzi Li; Zhiyuan Liu; Peng Li; Jie Zhou", + "authorids": "/x/xiaozhi-wang/; /y/yulin-chen/; /n/ning-ding/; /h/hao-peng/; /z/zimu-wang/; /y/yankai-lin/; /x/xu-han/; /l/lei-hou/; /j/juanzi-li/; /z/zhiyuan-liu/; /p/peng-li/; /j/jie-zhou/", + "bibtex": "@inproceedings{wang-etal-2022-maven,\n title = \"{MAVEN}-{ERE}: A Unified Large-scale Dataset for Event Coreference, Temporal, Causal, and Subevent Relation Extraction\",\n author = \"Wang, Xiaozhi and\n Chen, Yulin and\n Ding, Ning and\n Peng, Hao and\n Wang, Zimu and\n Lin, Yankai and\n Han, Xu and\n Hou, Lei and\n Li, Juanzi and\n Liu, Zhiyuan and\n Li, Peng and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.60/\",\n doi = \"10.18653/v1/2022.emnlp-main.60\",\n pages = \"926--941\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.60.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.60/", + "pdf_size": 424239, + "gs_citation": 57, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10560551386203403936&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": ";;;;;;;;;;;", + "aff_domain": ";;;;;;;;;;;", + "email": ";;;;;;;;;;;", + "github": "https://github.com/THU-KEG/MAVEN-ERE", + "project": "", + "author_num": 12 + }, + { + "id": "2022.findings-emnlp.500", + "title": "MBTI Personality Prediction for Fictional Characters Using Movie Scripts", + "track": "main", + "status": "finding", + "award": false, + "abstract": "An NLP model that understands stories should be able to understand the characters in them. To support the development of neural models for this purpose, we construct a benchmark, Story2Personality. The task is to predict a movie character\u2019s MBTI or Big 5 personality types based on the narratives of the character. Experiments show that our task is challenging for the existing text classification models, as none is able to largely outperform random guesses. We further proposed a multi-view model for personality prediction using both verbal and non-verbal descriptions, which gives improvement compared to using only verbal descriptions. The uniqueness and challenges in our dataset call for the development of narrative comprehension techniques from the perspective of understanding characters.", + "author": "Yisi Sang; Xiangyang Mou; Mo Yu; Dakuo Wang; Jing Li; Jeffrey Stanton", + "authorids": "/y/yisi-sang/; /x/xiangyang-mou/; /m/mo-yu/; /d/dakuo-wang/; /j/jing-li/; /j/jeffrey-stanton/", + "bibtex": "@inproceedings{sang-etal-2022-mbti,\n title = \"{MBTI} Personality Prediction for Fictional Characters Using Movie Scripts\",\n author = \"Sang, Yisi and\n Mou, Xiangyang and\n Yu, Mo and\n Wang, Dakuo and\n Li, Jing and\n Stanton, Jeffrey\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.500/\",\n doi = \"10.18653/v1/2022.findings-emnlp.500\",\n pages = \"6715--6724\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.500.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.500/", + "pdf_size": 1678816, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10900039685090707488&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 8, + "aff": "Syracuse University; Rensselaer Polytechnic Institute; Pattern Recognition Center, WeChat AI; IBM Research, Northeastern University; New Jersey Institute of Technology; Syracuse University", + "aff_domain": "syr.edu;rpi.edu;tencent.com; ; ; ", + "email": "syr.edu;rpi.edu;tencent.com; ; ; ", + "github": "https://github.com/YisiSang/Story2Personality", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;0", + "aff_unique_norm": "Syracuse University;Rensselaer Polytechnic Institute;WeChat AI;IBM Research;New Jersey Institute of Technology", + "aff_unique_dep": ";;Pattern Recognition Center;;", + "aff_unique_url": "https://www.syracuse.edu;https://www.rpi.edu;https://wwwwechat.com;https://www.ibm.com/research;https://www.njit.edu", + "aff_unique_abbr": "Syracuse;RPI;WeChat AI;IBM;NJIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.73", + "title": "MCP: Self-supervised Pre-training for Personalized Chatbots with Multi-level Contrastive Sampling", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Personalized chatbots focus on endowing the chatbots with a consistent personality to behave like real users and further act as personal assistants. Previous studies have explored generating implicit user profiles from the user\u2019s dialogue history for building personalized chatbots. However, these studies only use the response generation loss to train the entire model, thus it is prone to suffer from the problem of data sparsity. Besides, they overemphasize the final generated response\u2019s quality while ignoring the correlations and fusions between the user\u2019s dialogue history, leading to rough data representations and performance degradation. To tackle these problems, we propose a self-supervised learning framework MCP for capturing better representations from users\u2019 dialogue history for personalized chatbots. Specifically, we apply contrastive sampling methods to leverage the supervised signals hidden in user dialog history, and generate the pre-training samples for enhancing the model. We design three pre-training tasks based on three types of contrastive pairs from user dialogue history, namely response pairs, sequence augmentation pairs, and user pairs. We pre-train the utterance encoder and the history encoder towards the contrastive objectives and use these pre-trained encoders for generating user profiles while personalized response generation. Experimental results on two real-world datasets show a significant improvement in our proposed model MCP compared with the existing methods.", + "author": "Zhaoheng Huang; Zhicheng Dou; Yutao Zhu; Zhengyi Ma", + "authorids": "/z/zhaoheng-huang/; /z/zhicheng-dou/; /y/yutao-zhu/; /z/zhengyi-ma/", + "bibtex": "@inproceedings{huang-etal-2022-mcp,\n title = \"{MCP}: Self-supervised Pre-training for Personalized Chatbots with Multi-level Contrastive Sampling\",\n author = \"Huang, Zhaoheng and\n Dou, Zhicheng and\n Zhu, Yutao and\n Ma, Zhengyi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.73/\",\n doi = \"10.18653/v1/2022.findings-emnlp.73\",\n pages = \"1030--1042\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.73.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.73/", + "pdf_size": 973742, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1289776830711864462&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Gaoling School of Artificial Intelligence, Renmin University of China; Gaoling School of Artificial Intelligence, Renmin University of China; University of Montreal; Gaoling School of Artificial Intelligence, Renmin University of China", + "aff_domain": "ruc.edu.cn;ruc.edu.cn;umontreal.ca;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;umontreal.ca;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Renmin University of China;University of Montreal", + "aff_unique_dep": "Gaoling School of Artificial Intelligence;", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.umontreal.ca", + "aff_unique_abbr": "RUC;UM", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "China;Canada" + }, + { + "id": "2022.findings-emnlp.439", + "title": "MCPG: A Flexible Multi-Level Controllable Framework for Unsupervised Paraphrase Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We present MCPG: a simple and effectiveapproach for controllable unsupervised paraphrase generation, which is also flexible toadapt to specific domains without extra training. MCPG is controllable in different levels: local lexicons, global semantics, and universal styles. The unsupervised paradigm ofMCPG combines factual keywords and diversified semantic embeddings as local lexical andglobal semantic constraints. The semantic embeddings are diversified by standard dropout,which is exploited for the first time to increaseinference diversity by us. Moreover, MCPGis qualified with good domain adaptability byadding a transfer vector as a universal style constraint, which is refined from the exemplars retrieved from the corpus of the target domain in atraining-free way. Extensive experiments showthat MCPG outperforms state-of-the-art unsupervised baselines by a margin. Meanwhile,our domain-adapted MCPG also achieves competitive performance with strong supervisedbaselines even without training.", + "author": "Yi Chen; Haiyun Jiang; Lemao Liu; Rui Wang; Shuming Shi; Ruifeng Xu", + "authorids": "/y/yi-chen/; /h/haiyun-jiang/; /l/lemao-liu/; /r/rui-wang/; /s/shuming-shi/; /r/ruifeng-xu/", + "bibtex": "@inproceedings{chen-etal-2022-mcpg,\n title = \"{MCPG}: A Flexible Multi-Level Controllable Framework for Unsupervised Paraphrase Generation\",\n author = \"Chen, Yi and\n Jiang, Haiyun and\n Liu, Lemao and\n Wang, Rui and\n Shi, Shuming and\n Xu, Ruifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.439/\",\n doi = \"10.18653/v1/2022.findings-emnlp.439\",\n pages = \"5948--5958\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.439.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.439/", + "pdf_size": 632134, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9293296576018781043&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China; ; ; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "gmail.com;outlook.com;hit.edu.cn; ; ; ", + "email": "gmail.com;outlook.com;hit.edu.cn; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2;0+1+2;0+1+2;0+1+2", + "aff_unique_norm": "Harbin Institute of Technology;Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies;Peng Cheng Laboratory", + "aff_unique_dep": ";Provincial Key Laboratory of Novel Security Intelligence Technologies;", + "aff_unique_url": "http://en.hhit.edu.cn/;;", + "aff_unique_abbr": "HIT;;", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.652", + "title": "MEE: A Novel Multilingual Event Extraction Dataset", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Event Extraction (EE) is one of the fundamental tasks in Information Extraction (IE) that aims to recognize event mentions and their arguments (i.e., participants) from text. Due to its importance, extensive methods and resources have been developed for Event Extraction. However, one limitation of current research for EE involves the under-exploration for non-English languages in which the lack of high-quality multilingual EE datasets for model training and evaluation has been the main hindrance. To address this limitation, we propose a novel Multilingual Event Extraction dataset (MEE) that provides annotation for more than 50K event mentions in 8 typologically different languages. MEE comprehensively annotates data for entity mentions, event triggers and event arguments. We conduct extensive experiments on the proposed dataset to reveal challenges and opportunities for multilingual EE. To foster future research in this direction, our dataset will be publicly available.", + "author": "Amir Pouran Ben Veyseh; Javid Ebrahimi; Franck Dernoncourt; Thien Nguyen", + "authorids": "/a/amir-pouran-ben-veyseh/; /j/javid-ebrahimi/; /f/franck-dernoncourt/; /t/thien-nguyen/", + "bibtex": "@inproceedings{pouran-ben-veyseh-etal-2022-mee,\n title = \"{MEE}: A Novel Multilingual Event Extraction Dataset\",\n author = \"Pouran Ben Veyseh, Amir and\n Ebrahimi, Javid and\n Dernoncourt, Franck and\n Nguyen, Thien\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.652/\",\n doi = \"10.18653/v1/2022.emnlp-main.652\",\n pages = \"9603--9613\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.652.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.652/", + "pdf_size": 243020, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18260804563210566103&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science, University of Oregon, Eugene, OR, USA; Department of Computer Science, University of Oregon, Eugene, OR, USA; Adobe Research, Seattle, WA, USA; Department of Computer Science, University of Oregon, Eugene, OR, USA", + "aff_domain": "cs.uoregon.edu;gmail.com;adobe.com;cs.uoregon.edu", + "email": "cs.uoregon.edu;gmail.com;adobe.com;cs.uoregon.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "University of Oregon;Adobe Research", + "aff_unique_dep": "Department of Computer Science;", + "aff_unique_url": "https://www.uoregon.edu;https://research.adobe.com", + "aff_unique_abbr": "UO;Adobe", + "aff_campus_unique_index": "0;0;1;0", + "aff_campus_unique": "Eugene;Seattle", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.449", + "title": "META-GUI: Towards Multi-modal Conversational Agents on Mobile GUI", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Task-oriented dialogue (TOD) systems have been widely used by mobile phone intelligent assistants to accomplish tasks such as calendar scheduling or hotel reservation. Current TOD systems usually focus on multi-turn text/speech interaction, then they would call back-end APIs designed for TODs to perform the task. However, this API-based architecture greatly limits the information-searching capability of intelligent assistants and may even lead to task failure if TOD-specific APIs are not available or the task is too complicated to be executed by the provided APIs. In this paper, we propose a new TOD architecture: GUI-based task-oriented dialogue system (GUI-TOD). A GUI-TOD system can directly perform GUI operations on real APPs and execute tasks without invoking TOD-specific backend APIs. Furthermore, we release META-GUI, a dataset for training a Multi-modal convErsaTional Agent on mobile GUI. We also propose a multi-model action prediction and response model, which show promising results on META-GUI. The dataset, codes and leaderboard are publicly available.", + "author": "Liangtai Sun; Xingyu Chen; Lu Chen; Tianle Dai; Zichen Zhu; Kai Yu", + "authorids": "/l/liangtai-sun/; /x/xingyu-chen/; /l/lu-chen/; /t/tianle-dai/; /z/zichen-zhu/; /k/kai-yu/", + "bibtex": "@inproceedings{sun-etal-2022-meta,\n title = \"{META}-{GUI}: Towards Multi-modal Conversational Agents on Mobile {GUI}\",\n author = \"Sun, Liangtai and\n Chen, Xingyu and\n Chen, Lu and\n Dai, Tianle and\n Zhu, Zichen and\n Yu, Kai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.449/\",\n doi = \"10.18653/v1/2022.emnlp-main.449\",\n pages = \"6699--6712\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.449.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.449/", + "pdf_size": 8337443, + "gs_citation": 61, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12517716836224024052&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "X-LANCE Lab, Department of Computer Science and Engineering; X-LANCE Lab, Department of Computer Science and Engineering; X-LANCE Lab, Department of Computer Science and Engineering + MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University; X-LANCE Lab, Department of Computer Science and Engineering; X-LANCE Lab, Department of Computer Science and Engineering; X-LANCE Lab, Department of Computer Science and Engineering + MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University", + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn", + "github": "", + "project": "https://x-lance.github.io/META-GUI-Leaderboard/", + "author_num": 6, + "aff_unique_index": "0;0;0+1;0;0;0+1", + "aff_unique_norm": "X-LANCE Lab;Shanghai Jiao Tong University", + "aff_unique_dep": "Department of Computer Science and Engineering;AI Institute", + "aff_unique_url": ";https://www.sjtu.edu.cn", + "aff_unique_abbr": ";SJTU", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "1;1", + "aff_country_unique": ";China" + }, + { + "id": "2022.emnlp-main.265", + "title": "MGDoc: Pre-training with Multi-granular Hierarchy for Document Image Understanding", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Document images are a ubiquitous source of data where the text is organized in a complex hierarchical structure ranging from fine granularity (e.g., words), medium granularity (e.g., regions such as paragraphs or figures), to coarse granularity (e.g., the whole page). The spatial hierarchical relationships between content at different levels of granularity are crucial for document image understanding tasks. Existing methods learn features from either word-level or region-level but fail to consider both simultaneously. Word-level models are restricted by the fact that they originate from pure-text language models, which only encode the word-level context. In contrast, region-level models attempt to encode regions corresponding to paragraphs or text blocks into a single embedding, but they perform worse with additional word-level features. To deal with these issues, we propose MGDoc, a new multi-modal multi-granular pre-training framework that encodes page-level, region-level, and word-level information at the same time. MGDoc uses a unified text-visual encoder to obtain multi-modal features across different granularities, which makes it possible to project the multi-granular features into the same hyperspace. To model the region-word correlation, we design a cross-granular attention mechanism and specific pre-training tasks for our model to reinforce the model of learning the hierarchy between regions and words. Experiments demonstrate that our proposed model can learn better features that perform well across granularities and lead to improvements in downstream tasks.", + "author": "Zilong Wang; Jiuxiang Gu; Chris Tensmeyer; Nikolaos Barmpalios; Ani Nenkova; Tong Sun; Jingbo Shang; Vlad Morariu", + "authorids": "/z/zilong-wang/; /j/jiuxiang-gu/; /c/chris-tensmeyer/; /n/nikolaos-barmpalios/; /a/ani-nenkova/; /t/tong-sun/; /j/jingbo-shang/; /v/vlad-morariu/", + "bibtex": "@inproceedings{wang-etal-2022-mgdoc,\n title = \"{MGD}oc: Pre-training with Multi-granular Hierarchy for Document Image Understanding\",\n author = \"Wang, Zilong and\n Gu, Jiuxiang and\n Tensmeyer, Chris and\n Barmpalios, Nikolaos and\n Nenkova, Ani and\n Sun, Tong and\n Shang, Jingbo and\n Morariu, Vlad\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.265/\",\n doi = \"10.18653/v1/2022.emnlp-main.265\",\n pages = \"3984--3993\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.265.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.265/", + "pdf_size": 1983017, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14092184883269549909&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "University of California, San Diego+Adobe Research; Adobe Research; Adobe Research; Adobe Research; Adobe Research; Adobe Research; University of California, San Diego+Adobe Research; Adobe Research", + "aff_domain": "ucsd.edu;adobe.com;adobe.com;adobe.com;adobe.com;adobe.com;ucsd.edu;adobe.com", + "email": "ucsd.edu;adobe.com;adobe.com;adobe.com;adobe.com;adobe.com;ucsd.edu;adobe.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;1;1;1;1;1;0+1;1", + "aff_unique_norm": "University of California, San Diego;Adobe", + "aff_unique_dep": ";Adobe Research", + "aff_unique_url": "https://www.ucsd.edu;https://research.adobe.com", + "aff_unique_abbr": "UCSD;Adobe", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "San Diego;", + "aff_country_unique_index": "0+0;0;0;0;0;0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.96", + "title": "MICO: A Multi-alternative Contrastive Learning Framework for Commonsense Knowledge Representation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Commonsense reasoning tasks such as commonsense knowledge graph completion and commonsense question answering require powerful representation learning. In this paper, we propose to learn commonsense knowledge representation by MICO, a Multi-alternative contrastIve learning framework on COmmonsense knowledge graphs (MICO). MICO generates the commonsense knowledge representation by contextual interaction between entity nodes and relations with multi-alternative contrastive learning. In MICO, the head and tail entities in an (h,r,t) knowledge triple are converted to two relation-aware sequence pairs (a premise and an alternative) in the form of natural language. Semantic representations generated by MICO can benefit the following two tasks by simply comparing the similarity score between the representations: 1) zero-shot commonsense question answering tasks; 2) inductive commonsense knowledge graph completion tasks. Extensive experiments show the effectiveness of our method.", + "author": "Ying Su; Zihao Wang; Tianqing Fang; Hongming Zhang; Yangqiu Song; Tong Zhang", + "authorids": "/y/ying-su/; /z/zihao-wang/; /t/tianqing-fang/; /h/hongming-zhang/; /y/yangqiu-song/; /t/tong-zhang/", + "bibtex": "@inproceedings{su-etal-2022-mico,\n title = \"{MICO}: A Multi-alternative Contrastive Learning Framework for Commonsense Knowledge Representation\",\n author = \"Su, Ying and\n Wang, Zihao and\n Fang, Tianqing and\n Zhang, Hongming and\n Song, Yangqiu and\n Zhang, Tong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.96/\",\n doi = \"10.18653/v1/2022.findings-emnlp.96\",\n pages = \"1339--1351\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.96.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.96/", + "pdf_size": 490547, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14460245624438587920&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "HKUST; HKUST; HKUST; Tencent AI lab, Seattle; HKUST; HKUST", + "aff_domain": "connect.ust.hk;connect.ust.hk;cse.ust.hk;global.tencent.com;cse.ust.hk;ust.hk", + "email": "connect.ust.hk;connect.ust.hk;cse.ust.hk;global.tencent.com;cse.ust.hk;ust.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;0", + "aff_unique_norm": "Hong Kong University of Science and Technology;Tencent", + "aff_unique_dep": ";AI lab", + "aff_unique_url": "https://www.ust.hk;https://ai.tencent.com", + "aff_unique_abbr": "HKUST;Tencent AI lab", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Seattle", + "aff_country_unique_index": "0;0;0;1;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.717", + "title": "MM-Align: Learning Optimal Transport-based Alignment Dynamics for Fast and Accurate Inference on Missing Modality Sequences", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing multimodal tasks mostly target at the complete input modality setting, i.e., each modality is either complete or completely missing in both training and test sets. However, the randomly missing situations have still been underexplored. In this paper, we present a novel approach named MM-Align to address the missing-modality inference problem. Concretely, we propose 1) an alignment dynamics learning module based on the theory of optimal transport (OT) for missing data imputation; 2) a denoising training algorithm to enhance the quality of imputation as well as the accuracy of model predictions. Compared with previous generative methods which devote to restoring the missing inputs, MM-Align learns to capture and imitate the alignment dynamics between modality sequences. Results of comprehensive experiments on two multimodal tasks empirically demonstrate that our method can perform more accurate and faster inference and alleviate the overfitting issue under different missing conditions.", + "author": "Wei Han; Hui Chen; Min-Yen Kan; Soujanya Poria", + "authorids": "/w/wei-han/; /h/hui-chen/; /m/min-yen-kan/; /s/soujanya-poria/", + "bibtex": "@inproceedings{han-etal-2022-mm,\n title = \"{MM}-Align: Learning Optimal Transport-based Alignment Dynamics for Fast and Accurate Inference on Missing Modality Sequences\",\n author = \"Han, Wei and\n Chen, Hui and\n Kan, Min-Yen and\n Poria, Soujanya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.717/\",\n doi = \"10.18653/v1/2022.emnlp-main.717\",\n pages = \"10498--10511\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.717.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.717/", + "pdf_size": 946502, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10559931420329271422&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/declare-lab/MM-Align", + "project": "", + "author_num": 4 + }, + { + "id": "2022.findings-emnlp.333", + "title": "MOBA-E2C: Generating MOBA Game Commentaries via Capturing Highlight Events from the Meta-Data", + "track": "main", + "status": "finding", + "award": false, + "abstract": "MOBA (Multiplayer Online Battle Arena) games such as Dota2 are currently one of the most popular e-sports gaming genres. Following professional commentaries is a great way to understand and enjoy a MOBA game. However, massive game competitions lack commentaries because of the shortage of professional human commentators. As an alternative, employing machine commentators that can work at any time and place is a feasible solution. Considering the challenges in modeling MOBA games, we propose a data-driven MOBA commentary generation framework, MOBA-E2C, allowing a model to generate commentaries based on the game meta-data. Subsequently, to alleviate the burden of collecting supervised data, we propose a MOBA-FuseGPT generator to generate MOBA game commentaries by fusing the power of a rule-based generator and a generative GPT generator. Finally, in the experiments, we take a popular MOBA game Dota2 as our case and construct a Chinese Dota2 commentary generation dataset Dota2-Commentary. Experimental results demonstrate the superior performance of our approach. To the best of our knowledge, this work is the first Dota2 machine commentator and Dota2-Commentary is the first dataset.", + "author": "Dawei Zhang; Sixing Wu; Yao Guo; Xiangqun Chen", + "authorids": "/d/dawei-zhang/; /s/sixing-wu/; /y/yao-guo/; /x/xiangqun-chen/", + "bibtex": "@inproceedings{zhang-etal-2022-moba,\n title = \"{MOBA}-{E}2{C}: Generating {MOBA} Game Commentaries via Capturing Highlight Events from the Meta-Data\",\n author = \"Zhang, Dawei and\n Wu, Sixing and\n Guo, Yao and\n Chen, Xiangqun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.333/\",\n doi = \"10.18653/v1/2022.findings-emnlp.333\",\n pages = \"4545--4556\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.333.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.333/", + "pdf_size": 4658685, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1469218112084608132&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Key Laboratory of High-Confidence Software Technologies (MOE), School of Computer Science, Peking University, Beijing, China + GamesMind Technology, Beijing, China; School of Computer Science, Peking University, Beijing, China + GamesMind Technology, Beijing, China; Key Laboratory of High-Confidence Software Technologies (MOE), School of Computer Science, Peking University, Beijing, China; Key Laboratory of High-Confidence Software Technologies (MOE), School of Computer Science, Peking University, Beijing, China", + "aff_domain": "pku.edu.cn; ; ; ", + "email": "pku.edu.cn; ; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0;0", + "aff_unique_norm": "Peking University;GamesMind Technology", + "aff_unique_dep": "School of Computer Science;", + "aff_unique_url": "http://www.pku.edu.cn;", + "aff_unique_abbr": "PKU;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.705", + "title": "MOCHA: A Multi-Task Training Approach for Coherent Text Generation from Cognitive Perspective", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Teaching neural models to generate narrative coherent texts is a critical problem. Recent pre-trained language models have achieved promising results, but there is still a gap between human written texts and machine-generated outputs. In this work, we propose a novel multi-task training strategy for long text generation grounded on the cognitive theory of writing, which empowers the model to learn essential subskills needed for writing including planning and reviewing besides end-to-end generation. We extensively evaluate our model on three open-ended generation tasks including story generation, news article writing and argument generation. Experiments show that our model achieves better results on both few-shot and fully-supervised settings than strong baselines, and human evaluations confirm that our model can generate more coherent outputs.", + "author": "Zhe Hu; Hou Pong Chan; Lifu Huang", + "authorids": "/z/zhe-hu/; /h/hou-pong-chan/; /l/lifu-huang/", + "bibtex": "@inproceedings{hu-etal-2022-mocha,\n title = \"{MOCHA}: A Multi-Task Training Approach for Coherent Text Generation from Cognitive Perspective\",\n author = \"Hu, Zhe and\n Chan, Hou Pong and\n Huang, Lifu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.705/\",\n doi = \"10.18653/v1/2022.emnlp-main.705\",\n pages = \"10324--10334\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.705.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.705/", + "pdf_size": 369479, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8122997601060581180&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Baidu Inc; University of Macau; Virginia Tech", + "aff_domain": "baidu.com;um.edu.mo;vt.edu", + "email": "baidu.com;um.edu.mo;vt.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Baidu;University of Macau;Virginia Tech", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.baidu.com;https://www.um.edu.mo;https://www.vt.edu", + "aff_unique_abbr": "Baidu;UM;VT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2", + "aff_country_unique": "China;Macau;United States" + }, + { + "id": "2022.emnlp-main.288", + "title": "MT-GenEval: A Counterfactual and Contextual Dataset for Evaluating Gender Accuracy in Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "As generic machine translation (MT) quality has improved, the need for targeted benchmarks that explore fine-grained aspects of quality has increased. In particular, gender accuracy in translation can have implications in terms of output fluency, translation accuracy, and ethics. In this paper, we introduce MT-GenEval, a benchmark for evaluating gender accuracy in translation from English into eight widely-spoken languages. MT-GenEval complements existing benchmarks by providing realistic, gender-balanced, counterfactual data in eight language pairs where the gender of individuals is unambiguous in the input segment, including multi-sentence segments requiring inter-sentential gender agreement. Our data and code is publicly available under a CC BY SA 3.0 license.", + "author": "Anna Currey; Maria Nadejde; Raghavendra Reddy Pappagari; Mia Mayer; Stanislas Lauly; Xing Niu; Benjamin Hsu; Georgiana Dinu", + "authorids": "/a/anna-currey/; /m/maria-nadejde/; /r/raghavendra-reddy-pappagari/; /m/mia-mayer/; /s/stanislas-lauly/; /x/xing-niu/; /b/benjamin-hsu/; /g/georgiana-dinu/", + "bibtex": "@inproceedings{currey-etal-2022-mt,\n title = \"{MT}-{G}en{E}val: A Counterfactual and Contextual Dataset for Evaluating Gender Accuracy in Machine Translation\",\n author = \"Currey, Anna and\n Nadejde, Maria and\n Pappagari, Raghavendra Reddy and\n Mayer, Mia and\n Lauly, Stanislas and\n Niu, Xing and\n Hsu, Benjamin and\n Dinu, Georgiana\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.288/\",\n doi = \"10.18653/v1/2022.emnlp-main.288\",\n pages = \"4287--4299\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.288.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.288/", + "pdf_size": 277437, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=220940764187477723&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "AWS AI Labs; AWS AI Labs; AWS AI Labs; AWS AI Labs; AWS AI Labs; AWS AI Labs; AWS AI Labs; AWS AI Labs", + "aff_domain": "amazon.com; ; ; ; ; ; ; ", + "email": "amazon.com; ; ; ; ; ; ; ", + "github": "https://github.com/amazon-research/machine-translation-gender-eval", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Amazon Web Services", + "aff_unique_dep": "AWS AI Labs", + "aff_unique_url": "https://aws.amazon.com", + "aff_unique_abbr": "AWS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.191", + "title": "MUSIED: A Benchmark for Event Detection from Multi-Source Heterogeneous Informal Texts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Event detection (ED) identifies and classifies event triggers from unstructured texts, serving as a fundamental task for information extraction. Despite the remarkable progress achieved in the past several years, most research efforts focus on detecting events from formal texts (e.g., news articles, Wikipedia documents, financial announcements). Moreover, the texts in each dataset are either from a single source or multiple yet relatively homogeneous sources. With massive amounts of user-generated text accumulating on the Web and inside enterprises, identifying meaningful events in these informal texts, usually from multiple heterogeneous sources, has become a problem of significant practical value. As a pioneering exploration that expands event detection to the scenarios involving informal and heterogeneous texts, we propose a new large-scale Chinese event detection dataset based on user reviews, text conversations, and phone conversations in a leading e-commerce platform for food service. We carefully investigate the proposed dataset\u2019s textual informality and multi-domain heterogeneity characteristics by inspecting data samples quantitatively and qualitatively. Extensive experiments with state-of-the-art event detection methods verify the unique challenges posed by these characteristics, indicating that multi-domain informal event detection remains an open problem and requires further efforts. Our benchmark and code are released at https://github.com/myeclipse/MUSIED.", + "author": "Xiangyu Xi; Jianwei Lv; Shuaipeng Liu; Wei Ye; Fan Yang; Guanglu Wan", + "authorids": "/x/xiangyu-xi/; /j/jianwei-lv/; /s/shuaipeng-liu/; /w/wei-ye/; /f/fan-yang/; /g/guanglu-wan/", + "bibtex": "@inproceedings{xi-etal-2022-musied,\n title = \"{MUSIED}: A Benchmark for Event Detection from Multi-Source Heterogeneous Informal Texts\",\n author = \"Xi, Xiangyu and\n Lv, Jianwei and\n Liu, Shuaipeng and\n Ye, Wei and\n Yang, Fan and\n Wan, Guanglu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.191/\",\n doi = \"10.18653/v1/2022.emnlp-main.191\",\n pages = \"2947--2964\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.191.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.191/", + "pdf_size": 764932, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13324957876997698230&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Meituan Group, Beijing, China; Meituan Group, Beijing, China; Meituan Group, Beijing, China + National Engineering Research Center for Software Engineering, Peking University, Beijing, China; National Engineering Research Center for Software Engineering, Peking University, Beijing, China; Meituan Group, Beijing, China; Meituan Group, Beijing, China", + "aff_domain": "meituan.com; ;pku.edu.cn; ; ; ", + "email": "meituan.com; ;pku.edu.cn; ; ; ", + "github": "https://github.com/myeclipse/MUSIED", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0+1;1;0;0", + "aff_unique_norm": "Meituan Group;Peking University", + "aff_unique_dep": ";National Engineering Research Center for Software Engineering", + "aff_unique_url": "https://www.meituan.com;http://www.pku.edu.cn", + "aff_unique_abbr": "Meituan;PKU", + "aff_campus_unique_index": "0;0;0+0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.230", + "title": "Machine Translation Robustness to Natural Asemantic Variation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Current Machine Translation (MT) models still struggle with more challenging input, such as noisy data and tail-end words and phrases. Several works have addressed this robustness issue by identifying specific categories of noise and variation then tuning models to perform better on them. An important yet under-studied category involves minor variations in nuance (non-typos) that preserve meaning w.r.t. the target language. We introduce and formalize this category as Natural Asemantic Variation (NAV) and investigate it in the context of MT robustness. We find that existing MT models fail when presented with NAV data, but we demonstrate strategies to improve performance on NAV by fine-tuning them with human-generated variations. We also show that NAV robustness can be transferred across languages and find that synthetic perturbations can achieve some but not all of the benefits of organic NAV data.", + "author": "Jacob Bremerman; Xiang Ren; Jonathan May", + "authorids": "/j/jacob-bremerman/; /x/xiang-ren/; /j/jonathan-may/", + "bibtex": "@inproceedings{bremerman-etal-2022-machine,\n title = \"Machine Translation Robustness to Natural Asemantic Variation\",\n author = \"Bremerman, Jacob and\n Ren, Xiang and\n May, Jonathan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.230/\",\n doi = \"10.18653/v1/2022.emnlp-main.230\",\n pages = \"3517--3532\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.230.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.230/", + "pdf_size": 869341, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4207440492011066001&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "University of Southern California; University of Southern California; University of Southern California", + "aff_domain": "usc.edu;usc.edu;usc.edu", + "email": "usc.edu;usc.edu;usc.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "", + "aff_unique_url": "https://www.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.8", + "title": "Machine translation impact in E-commerce multilingual search", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Previous work suggests that performance of cross-lingual information retrieval correlates highly with the quality of Machine Translation. However, there may be a threshold beyond which improving query translation quality yields little or no benefit to further improve the retrieval performance. This threshold may depend upon multiple factors including the source and target languages, the existing MT system quality and the search pipeline. In order to identify the benefit of improving an MT system for a given search pipeline, we investigate the sensitivity of retrieval quality to the presence of different levels of MT quality using experimental datasets collected from actual traffic. We systematically improve the performance of our MT systems quality on language pairs as measured by MT evaluation metrics including Bleu and Chrf to determine their impact on search precision metrics and extract signals that help to guide the improvement strategies. Using this information we develop techniques to compare query translations for multiple language pairs and identify the most promising language pairs to invest and improve.", + "author": "Bryan Zhang; Amita Misra", + "authorids": "/b/bryan-zhang/; /a/amita-misra/", + "bibtex": "@inproceedings{zhang-misra-2022-machine,\n title = \"Machine translation impact in {E}-commerce multilingual search\",\n author = \"Zhang, Bryan and\n Misra, Amita\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.8/\",\n doi = \"10.18653/v1/2022.emnlp-industry.8\",\n pages = \"99--109\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.8.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.8/", + "pdf_size": 491402, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11961032473711862416&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Amazon.com; Amazon.com", + "aff_domain": "amazon.com;amazon.com", + "email": "amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Amazon", + "aff_unique_dep": "", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.82", + "title": "Maieutic Prompting: Logically Consistent Reasoning with Recursive Explanations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained language models (LMs) struggle with consistent reasoning; recently, prompting LMs to generate explanations that self-guide the inference has emerged as a promising direction to amend this. However, these approaches are fundamentally bounded by the correctness of explanations, which themselves are often noisy and inconsistent. In this work, we develop Maieutic Prompting, which aims to infer a correct answer to a question even from the unreliable generations of LM. Maieutic Prompting induces a tree of explanations abductively (e.g. X is true, because ...) and recursively, then frames the inference as a satisfiability problem over these explanations and their logical relations. We test Maieutic Prompting for true/false QA on three challenging benchmarks that require complex commonsense reasoning. Maieutic Prompting achieves up to 20% better accuracy than state-of-the-art prompting methods, and as a fully unsupervised approach, performs competitively with supervised models. We also show that Maieutic Prompting improves robustness in inference while providing interpretable rationales.", + "author": "Jaehun Jung; Lianhui Qin; Sean Welleck; Faeze Brahman; Chandra Bhagavatula; Ronan Le Bras; Yejin Choi", + "authorids": "/j/jaehun-jung/; /l/lianhui-qin/; /s/sean-welleck/; /f/faeze-brahman/; /c/chandra-bhagavatula/; /r/ronan-le-bras/; /y/yejin-choi/", + "bibtex": "@inproceedings{jung-etal-2022-maieutic,\n title = \"Maieutic Prompting: Logically Consistent Reasoning with Recursive Explanations\",\n author = \"Jung, Jaehun and\n Qin, Lianhui and\n Welleck, Sean and\n Brahman, Faeze and\n Bhagavatula, Chandra and\n Le Bras, Ronan and\n Choi, Yejin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.82/\",\n doi = \"10.18653/v1/2022.emnlp-main.82\",\n pages = \"1266--1279\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.82.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.82/", + "pdf_size": 2440762, + "gs_citation": 60, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15306954979170354979&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 7, + "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington+Allen Institute for Artificial Intelligence; Paul G. Allen School of Computer Science & Engineering, University of Washington+Allen Institute for Artificial Intelligence; Paul G. Allen School of Computer Science & Engineering, University of Washington+Allen Institute for Artificial Intelligence; Paul G. Allen School of Computer Science & Engineering, University of Washington+Allen Institute for Artificial Intelligence; Allen Institute for Artificial Intelligence; Allen Institute for Artificial Intelligence; Paul G. Allen School of Computer Science & Engineering, University of Washington+Allen Institute for Artificial Intelligence", + "aff_domain": "cs.washington.edu; ; ; ; ; ; ", + "email": "cs.washington.edu; ; ; ; ; ; ", + "github": "https://github.com/jaehunjung1/Maieutic-Prompting", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;0+1;1;1;0+1", + "aff_unique_norm": "University of Washington;Allen Institute for Artificial Intelligence", + "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering;", + "aff_unique_url": "https://www.washington.edu;https://allenai.org", + "aff_unique_abbr": "UW;AI2", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Seattle;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.217", + "title": "Making Pretrained Language Models Good Long-tailed Learners", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prompt-tuning has shown appealing performance in few-shot classification by virtue of its capability in effectively exploiting pre-trained knowledge. This motivates us to check the hypothesis that prompt-tuning is also a promising choice for long-tailed classification, since the tail classes are intuitively few-shot ones. To achieve this aim, we conduct empirical studies to examine the hypothesis. The results demonstrate that prompt-tuning makes pretrained language models at least good long-tailed learners. For intuitions on why prompt-tuning can achieve good performance in long-tailed classification, we carry out in-depth analyses by progressively bridging the gap between prompt-tuning and commonly used finetuning. The summary is that the classifier structure and parameterization form the key to making good long-tailed learners, in comparison with the less important input structure. Finally, we verify the applicability of our finding to few-shot classification.", + "author": "Chen Zhang; Lei Ren; Jingang Wang; Wei Wu; Dawei Song", + "authorids": "/c/chen-zhang/; /l/lei-ren/; /j/jingang-wang/; /w/wei-wu/; /d/dawei-song/", + "bibtex": "@inproceedings{zhang-etal-2022-making,\n title = \"Making Pretrained Language Models Good Long-tailed Learners\",\n author = \"Zhang, Chen and\n Ren, Lei and\n Wang, Jingang and\n Wu, Wei and\n Song, Dawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.217/\",\n doi = \"10.18653/v1/2022.emnlp-main.217\",\n pages = \"3298--3312\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.217.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.217/", + "pdf_size": 780617, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9117915226887696815&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Beijing Institute of Technology; Beijing Institute of Technology; Beijing Institute of Technology + Meituan NLP; Meituan NLP; Beijing Institute of Technology", + "aff_domain": "bit.edu.cn;163.com;meituan.com;meituan.com;bit.edu.cn", + "email": "bit.edu.cn;163.com;meituan.com;meituan.com;bit.edu.cn", + "github": "https://github.com/GeneZC/Glee", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0+1;1;0", + "aff_unique_norm": "Beijing Institute of Technology;Meituan", + "aff_unique_dep": ";NLP", + "aff_unique_url": "http://www.bit.edu.cn/;https://www.meituan.com", + "aff_unique_abbr": "BIT;Meituan", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.724", + "title": "Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Lay summarisation aims to jointly summarise and simplify a given text, thus making its content more comprehensible to non-experts.Automatic approaches for lay summarisation can provide significant value in broadening access to scientific literature, enabling a greater degree of both interdisciplinary knowledge sharing and public understanding when it comes to research findings. However, current corpora for this task are limited in their size and scope, hindering the development of broadly applicable data-driven approaches. Aiming to rectify these issues, we present two novel lay summarisation datasets, PLOS (large-scale) and eLife (medium-scale), each of which contains biomedical journal articles alongside expert-written lay summaries.We provide a thorough characterisation of our lay summaries, highlighting differing levels of readability and abstractivenessbetween datasets that can be leveraged to support the needs of different applications.Finally, we benchmark our datasets using mainstream summarisation approaches and perform a manual evaluation with domain experts, demonstrating their utility and casting light on the key challenges of this task.", + "author": "Tomas Goldsack; Zhihao Zhang; Chenghua Lin; Carolina Scarton", + "authorids": "/t/tomas-goldsack/; /z/zhihao-zhang/; /c/chenghua-lin/; /c/carolina-scarton/", + "bibtex": "@inproceedings{goldsack-etal-2022-making,\n title = \"Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature\",\n author = \"Goldsack, Tomas and\n Zhang, Zhihao and\n Lin, Chenghua and\n Scarton, Carolina\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.724/\",\n doi = \"10.18653/v1/2022.emnlp-main.724\",\n pages = \"10589--10604\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.724.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.724/", + "pdf_size": 691459, + "gs_citation": 70, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8073933195845920099&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Department of Computer Science, University of Sheffield, UK; School of Economics and Management, Beihang University, China; Department of Computer Science, University of Sheffield, UK; Department of Computer Science, University of Sheffield, UK", + "aff_domain": "sheffield.ac.uk;buaa.edu.cn;sheffield.ac.uk;sheffield.ac.uk", + "email": "sheffield.ac.uk;buaa.edu.cn;sheffield.ac.uk;sheffield.ac.uk", + "github": "https://github.com/TGoldsack1/Corpora_for_Lay_Summarisation", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of Sheffield;Beihang University", + "aff_unique_dep": "Department of Computer Science;School of Economics and Management", + "aff_unique_url": "https://www.sheffield.ac.uk;http://www.buaa.edu.cn", + "aff_unique_abbr": "Sheffield;Beihang", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "United Kingdom;China" + }, + { + "id": "2022.emnlp-main.298", + "title": "MasakhaNER 2.0: Africa-centric Transfer Learning for Named Entity Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "African languages are spoken by over a billion people, but they are under-represented in NLP research and development. Multiple challenges exist, including the limited availability of annotated training and evaluation datasets as well as the lack of understanding of which settings, languages, and recently proposed methods like cross-lingual transfer will be effective. In this paper, we aim to move towards solutions for these challenges, focusing on the task of named entity recognition (NER). We present the creation of the largest to-date human-annotated NER dataset for 20 African languages. We study the behaviour of state-of-the-art cross-lingual transfer methods in an Africa-centric setting, empirically demonstrating that the choice of source transfer language significantly affects performance. While much previous work defaults to using English as the source language, our results show that choosing the best transfer language improves zero-shot F1 scores by an average of 14% over 20 languages as compared to using English.", + "author": "David Ifeoluwa Adelani; Graham Neubig; Sebastian Ruder; Shruti Rijhwani; Michael Beukman; Chester Palen-Michel; Constantine Lignos; Jesujoba O. Alabi; Shamsuddeen H. Muhammad; Peter Nabende; Cheikh M. Bamba Dione; Andiswa Bukula; Rooweither Mabuya; Bonaventure F. P. Dossou; Blessing Sibanda; Happy Buzaaba; Jonathan Mukiibi; Godson Kalipe; Derguene Mbaye; Amelia Taylor; Fatoumata Kabore; Chris Chinenye Emezue; Anuoluwapo Aremu; Perez Ogayo; Catherine Gitau; Edwin Munkoh-Buabeng; Victoire Memdjokam Koagne; Allahsera Auguste Tapo; Tebogo Macucwa; Vukosi Marivate; Elvis Mboning; Tajuddeen Gwadabe; Tosin Adewumi; Orevaoghene Ahia; Joyce Nakatumba-Nabende; Neo L. Mokono; Ignatius Ezeani; Chiamaka Chukwuneke; Mofetoluwa Adeyemi; Gilles Q. Hacheme; Idris Abdulmumin; Odunayo Ogundepo; Oreen Yousuf; Tatiana Moteu Ngoli; Dietrich Klakow", + "authorids": "/d/david-ifeoluwa-adelani/; /g/graham-neubig/; /s/sebastian-ruder/; /s/shruti-rijhwani/; /m/michael-beukman/; /c/chester-palen-michel/; /c/constantine-lignos/; /j/jesujoba-alabi/; /s/shamsuddeen-h-muhammad/; /p/peter-nabende/; /c/cheikh-m-bamba-dione/; /a/andiswa-bukula/; /r/rooweither-mabuya/; /b/bonaventure-f-p-dossou/; /b/blessing-sibanda/; /h/happy-buzaaba/; /j/jonathan-mukiibi/; /g/godson-kalipe/; /d/derguene-mbaye/; /a/amelia-taylor/; /f/fatoumata-kabore/; /c/chris-chinenye-emezue/; /a/anuoluwapo-aremu/; /p/perez-ogayo/; /c/catherine-gitau/; /e/edwin-munkoh-buabeng/; /v/victoire-memdjokam-koagne/; /a/allahsera-auguste-tapo/; /t/tebogo-macucwa/; /v/vukosi-marivate/; /e/elvis-mboning/; /t/tajuddeen-gwadabe/; /t/tosin-adewumi/; /o/orevaoghene-ahia/; /j/joyce-nakatumba-nabende/; /n/neo-l-mokono/; /i/ignatius-ezeani/; /c/chiamaka-chukwuneke/; /m/mofetoluwa-adeyemi/; /g/gilles-q-hacheme/; /i/idris-abdulmumin/; /o/odunayo-ogundepo/; /o/oreen-yousuf/; /t/tatiana-moteu-ngoli/; /d/dietrich-klakow/", + "bibtex": "@inproceedings{adelani-etal-2022-masakhaner,\n title = \"{M}asakha{NER} 2.0: {A}frica-centric Transfer Learning for Named Entity Recognition\",\n author = \"Adelani, David Ifeoluwa and\n Neubig, Graham and\n Ruder, Sebastian and\n Rijhwani, Shruti and\n Beukman, Michael and\n Palen-Michel, Chester and\n Lignos, Constantine and\n Alabi, Jesujoba O. and\n Muhammad, Shamsuddeen H. and\n Nabende, Peter and\n Dione, Cheikh M. Bamba and\n Bukula, Andiswa and\n Mabuya, Rooweither and\n Dossou, Bonaventure F. P. and\n Sibanda, Blessing and\n Buzaaba, Happy and\n Mukiibi, Jonathan and\n Kalipe, Godson and\n Mbaye, Derguene and\n Taylor, Amelia and\n Kabore, Fatoumata and\n Emezue, Chris Chinenye and\n Aremu, Anuoluwapo and\n Ogayo, Perez and\n Gitau, Catherine and\n Munkoh-Buabeng, Edwin and\n Memdjokam Koagne, Victoire and\n Tapo, Allahsera Auguste and\n Macucwa, Tebogo and\n Marivate, Vukosi and\n Mboning, Elvis and\n Gwadabe, Tajuddeen and\n Adewumi, Tosin and\n Ahia, Orevaoghene and\n Nakatumba-Nabende, Joyce and\n Mokono, Neo L. and\n Ezeani, Ignatius and\n Chukwuneke, Chiamaka and\n Adeyemi, Mofetoluwa and\n Hacheme, Gilles Q. and\n Abdulmumin, Idris and\n Ogundepo, Odunayo and\n Yousuf, Oreen and\n Moteu Ngoli, Tatiana and\n Klakow, Dietrich\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.298/\",\n doi = \"10.18653/v1/2022.emnlp-main.298\",\n pages = \"4488--4508\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.298.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.298/", + "pdf_size": 981646, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=846082181623495942&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": ";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;", + "aff_domain": ";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;", + "email": ";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;", + "github": "", + "project": "", + "author_num": 45 + }, + { + "id": "2022.findings-emnlp.106", + "title": "Mask More and Mask Later: Efficient Pre-training of Masked Language Models by Disentangling the [MASK] Token", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The pre-training of masked language models (MLMs) consumes massive computation to achieve good results on downstream NLP tasks, resulting in a large carbon footprint. In the vanilla MLM, the virtual tokens, [MASK]s, act as placeholders and gather the contextualized information from unmasked tokens to restore the corrupted information. It raises the question of whether we can append [MASK]s at a later layer, to reduce the sequence length for earlier layers and make the pre-training more efficient. We show: (1) [MASK]s can indeed be appended at a later layer, being disentangled from the word embedding; (2) The gathering of contextualized information from unmasked tokens can be conducted with a few layers. By further increasing the masking rate from 15% to 50%, we can pre-train RoBERTa-base and RoBERTa-large from scratch with only 78% and 68% of the original computational budget without any degradation on the GLUE benchmark. When pre-training with the original budget, our method outperforms RoBERTa for 6 out of 8 GLUE tasks, on average by 0.4%.", + "author": "Baohao Liao; David Thulke; Sanjika Hewavitharana; Hermann Ney; Christof Monz", + "authorids": "/b/baohao-liao/; /d/david-thulke/; /s/sanjika-hewavitharana/; /h/hermann-ney/; /c/christof-monz/", + "bibtex": "@inproceedings{liao-etal-2022-mask,\n title = \"Mask More and Mask Later: Efficient Pre-training of Masked Language Models by Disentangling the [{MASK}] Token\",\n author = \"Liao, Baohao and\n Thulke, David and\n Hewavitharana, Sanjika and\n Ney, Hermann and\n Monz, Christof\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.106/\",\n doi = \"10.18653/v1/2022.findings-emnlp.106\",\n pages = \"1478--1492\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.106.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.106/", + "pdf_size": 411318, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4523632462802768910&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "University of Amsterdam; RWTH Aachen University; eBay + University of Amsterdam; RWTH Aachen University; University of Amsterdam", + "aff_domain": "uva.nl;hltpr.rwth-aachen.de;ebay.com;informatik.rwth-aachen.de;uva.nl", + "email": "uva.nl;hltpr.rwth-aachen.de;ebay.com;informatik.rwth-aachen.de;uva.nl", + "github": "https://github.com/BaohaoLiao/3mlx1m2m3x4x5x1x4 x5m2m3x1x4 x5m2m3/token & positional information/positional information", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2+0;1;0", + "aff_unique_norm": "University of Amsterdam;RWTH Aachen University;eBay Inc.", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.uva.nl;https://www.rwth-aachen.de;https://www.ebay.com", + "aff_unique_abbr": "UvA;RWTH;eBay", + "aff_campus_unique_index": "1;;1", + "aff_campus_unique": ";Aachen", + "aff_country_unique_index": "0;1;2+0;1;0", + "aff_country_unique": "Netherlands;Germany;United States" + }, + { + "id": "2022.emnlp-main.708", + "title": "Mask the Correct Tokens: An Embarrassingly Simple Approach for Error Correction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Text error correction aims to correct the errors in text sequences such as those typed by humans or generated by speech recognition models.Previous error correction methods usually take the source (incorrect) sentence as encoder input and generate the target (correct) sentence through the decoder. Since the error rate of the incorrect sentence is usually low (e.g., 10%), the correction model can only learn to correct on limited error tokens but trivially copy on most tokens (correct tokens), which harms the effective training of error correction. In this paper, we argue that the correct tokens should be better utilized to facilitate effective training and then propose a simple yet effective masking strategy to achieve this goal.Specifically, we randomly mask out a part of the correct tokens in the source sentence and let the model learn to not only correct the original error tokens but also predict the masked tokens based on their context information. Our method enjoys several advantages: 1) it alleviates trivial copy; 2) it leverages effective training signals from correct tokens; 3) it is a plug-and-play module and can be applied to different models and tasks. Experiments on spelling error correction and speech recognition error correction on Mandarin datasets and grammar error correction on English datasets with both autoregressive and non-autoregressive generation models show that our method improves the correctionaccuracy consistently.", + "author": "Kai Shen; Yichong Leng; Xu Tan; Siliang Tang; Yuan Zhang; Wenjie Liu; Edward Lin", + "authorids": "/k/kai-shen/; /y/yichong-leng/; /x/xu-tan/; /s/siliang-tang/; /y/yuan-zhang/; /w/wenjie-liu/; /e/edward-lin/", + "bibtex": "@inproceedings{shen-etal-2022-mask,\n title = \"Mask the Correct Tokens: An Embarrassingly Simple Approach for Error Correction\",\n author = \"Shen, Kai and\n Leng, Yichong and\n Tan, Xu and\n Tang, Siliang and\n Zhang, Yuan and\n Liu, Wenjie and\n Lin, Edward\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.708/\",\n doi = \"10.18653/v1/2022.emnlp-main.708\",\n pages = \"10367--10380\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.708.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.708/", + "pdf_size": 580622, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14031051756091394652&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Zhejiang University; University of Science and Technology of China; Microsoft Research Asia; Microsoft Azure Speech; Microsoft Azure Speech; Microsoft Azure Speech; Microsoft Azure Speech", + "aff_domain": "zju.edu.cn;zju.edu.cn;mail.ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "zju.edu.cn;zju.edu.cn;mail.ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "https://github.com/microsoft/NeuralSpeech", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;3;3;3", + "aff_unique_norm": "Zhejiang University;University of Science and Technology of China;Microsoft Research;Microsoft Corporation", + "aff_unique_dep": ";;Research;Azure Speech", + "aff_unique_url": "https://www.zju.edu.cn;http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/asia;https://www.microsoft.com", + "aff_unique_abbr": "ZJU;USTC;MSR Asia;Microsoft", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;1;1;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.332", + "title": "Mask-then-Fill: A Flexible and Effective Data Augmentation Framework for Event Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We present Mask-then-Fill, a flexible and effective data augmentation framework for event extraction. Our approach allows for more flexible manipulation of text and thus can generate more diverse data while keeping the original event structure unchanged as much as possible. Specifically, it first randomly masks out an adjunct sentence fragment and then infills a variable-length text span with a fine-tuned infilling model. The main advantage lies in that it can replace a fragment of arbitrary length in the text with another fragment of variable length, compared to the existing methods which can only replace a single word or a fixed-length fragment. On trigger and argument extraction tasks, the proposed framework is more effective than baseline methods and it demonstrates particularly strong results in the low-resource setting. Our further analysis shows that it achieves a good balance between diversity and distributional similarity.", + "author": "Jun Gao; Changlong Yu; Wei Wang; Huan Zhao; Ruifeng Xu", + "authorids": "/j/jun-gao/; /c/changlong-yu/; /w/wei-wang/; /h/huan-zhao/; /r/ruifeng-xu/", + "bibtex": "@inproceedings{gao-etal-2022-mask,\n title = \"Mask-then-Fill: A Flexible and Effective Data Augmentation Framework for Event Extraction\",\n author = \"Gao, Jun and\n Yu, Changlong and\n Wang, Wei and\n Zhao, Huan and\n Xu, Ruifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.332/\",\n doi = \"10.18653/v1/2022.findings-emnlp.332\",\n pages = \"4537--4544\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.332.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.332/", + "pdf_size": 369492, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5218612373833580772&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.findings-emnlp.233", + "title": "Masked Language Models Know Which are Popular: A Simple Ranking Strategy for Commonsense Question Answering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We propose a simple ranking strategy to solve a generative commonsense question answering (QA) problem. Compared with multiple-choice QA, it is challenging because the answers to a question are not unique and they are supposed to be popular and diverse. Our strategy exploits the dataset itself and negative samples that we collect from WordNet to train a ranker that picks out the most popular answers for commonsense questions. The effectiveness of our strategy is verified on different pre-trained masked language models (MLMs) in a pipeline framework, where an MLM reranks the generated answers. Further, we explore an end-to-end framework where MLMs are utilized to guide the generation of generative language models (GLMs). Taking advantage of reinforcement learning, we apply policy gradient to train a GLM with the rewards fed back by an MLM. Empirical results on ProtoQA dataset demonstrate that MLMs can acquire the ability to distinguish the popular answers and improve the typical answer generation of GLMs as well.", + "author": "Xuan Luo; Chuang Fan; Yice Zhang; Wanguo Jiang; Bing Qin; Ruifeng Xu", + "authorids": "/x/xuan-luo/; /c/chuang-fan/; /y/yice-zhang/; /w/wanguo-jiang/; /b/bing-qin/; /r/ruifeng-xu/", + "bibtex": "@inproceedings{luo-etal-2022-masked,\n title = \"Masked Language Models Know Which are Popular: A Simple Ranking Strategy for Commonsense Question Answering\",\n author = \"Luo, Xuan and\n Fan, Chuang and\n Zhang, Yice and\n Jiang, Wanguo and\n Qin, Bing and\n Xu, Ruifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.233/\",\n doi = \"10.18653/v1/2022.findings-emnlp.233\",\n pages = \"3200--3213\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.233.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.233/", + "pdf_size": 764641, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18259857292629149165&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Harbin Insitute of Technology+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Insitute of Technology+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Insitute of Technology+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Merchants Securities Co., LTD.; Harbin Insitute of Technology+Peng Cheng Laboratory+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Insitute of Technology+Peng Cheng Laboratory+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies", + "aff_domain": "hotmail.com;gmail.com;163.com;cmschina.com.cn;ir.hit.edu.cn;hit.edu.cn", + "email": "hotmail.com;gmail.com;163.com;cmschina.com.cn;ir.hit.edu.cn;hit.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0+1;2;0+3+1;0+3+1", + "aff_unique_norm": "Harbin Institute of Technology;Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies;Merchants Securities Co., LTD.;Peng Cheng Laboratory", + "aff_unique_dep": ";Provincial Key Laboratory of Novel Security Intelligence Technologies;;", + "aff_unique_url": "http://www.hit.edu.cn/;;;http://www.pcl.ac.cn", + "aff_unique_abbr": "HIT;;;PCL", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Harbin;", + "aff_country_unique_index": "0+0;0+0;0+0;1;0+0+0;0+0+0", + "aff_country_unique": "China;Unknown" + }, + { + "id": "2022.findings-emnlp.146", + "title": "MatRank: Text Re-ranking by Latent Preference Matrix", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Text ranking plays a key role in providing content that best answers user queries. It is usually divided into two sub-tasks to perform efficient information retrieval given a query: text retrieval and text re-ranking. Recent research on pretrained language models (PLM) has demonstrated efficiency and gain on both sub-tasks. However, while existing methods have benefited from pre-trained language models and achieved high recall rates on passage retrieval, the ranking performance still demands further improvement. In this paper, we propose MatRank, which learns to re-rank the text retrieved for a given query by learning to predict the most relevant passage based on a latent preference matrix. Specifically, MatRank uses a PLM to generate an asymmetric latent matrix of relative preference scores between all pairs of retrieved passages. Then, the latent matrix is aggregated row-wise and column-wise to obtain global preferences and predictions of the most relevant passage in two of these directions, respectively. We conduct extensive experiments on MS MACRO, WikiAQ, and SemEval datasets. Experimental results show that MatRank has achieved new state-of-the-art results on these datasets, outperforming all prior methods on ranking performance metrics.", + "author": "Jinwen Luo; Jiuding Yang; Weidong Guo; Chenglin Li; Di Niu; Yu Xu", + "authorids": "/j/jinwen-luo/; /j/jiuding-yang/; /w/weidong-guo/; /c/chenglin-li/; /d/di-niu/; /y/yu-xu/", + "bibtex": "@inproceedings{luo-etal-2022-matrank,\n title = \"{M}at{R}ank: Text Re-ranking by Latent Preference Matrix\",\n author = \"Luo, Jinwen and\n Yang, Jiuding and\n Guo, Weidong and\n Li, Chenglin and\n Niu, Di and\n Xu, Yu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.146/\",\n doi = \"10.18653/v1/2022.findings-emnlp.146\",\n pages = \"2011--2023\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.146.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.146/", + "pdf_size": 1043968, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10145286480387773907&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "Platform and Content Group, Tencent; University of Alberta; Platform and Content Group, Tencent; University of Alberta; University of Alberta; Platform and Content Group, Tencent", + "aff_domain": "tencent.com;ualberta.ca;tencent.com;ualberta.ca;ualberta.ca;tencent.com", + "email": "tencent.com;ualberta.ca;tencent.com;ualberta.ca;ualberta.ca;tencent.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;1;1;0", + "aff_unique_norm": "Tencent;University of Alberta", + "aff_unique_dep": "Platform and Content Group;", + "aff_unique_url": "https://www.tencent.com;https://www.ualberta.ca", + "aff_unique_abbr": "Tencent;UAlberta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;1;1;0", + "aff_country_unique": "China;Canada" + }, + { + "id": "2022.emnlp-main.537", + "title": "MatchPrompt: Prompt-based Open Relation Extraction with Semantic Consistency Guided Clustering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Relation clustering is a general approach for open relation extraction (OpenRE). Current methods have two major problems. One is that their good performance relies on large amounts of labeled and pre-defined relational instances for pre-training, which are costly to acquire in reality. The other is that they only focus on learning a high-dimensional metric space to measure the similarity of novel relations and ignore the specific relational representations of clusters. In this work, we propose a new prompt-based framework named MatchPrompt, which can realize OpenRE with efficient knowledge transfer from only a few pre-defined relational instances as well as mine the specific meanings for cluster interpretability. To our best knowledge, we are the first to introduce a prompt-based framework for unlabeled clustering. Experimental results on different datasets show that MatchPrompt achieves the new SOTA results for OpenRE.", + "author": "Jiaxin Wang; Lingling Zhang; Jun Liu; Xi Liang; Yujie Zhong; Yaqiang Wu", + "authorids": "/j/jiaxin-wang/; /l/lingling-zhang/; /j/jun-liu/; /x/xi-liang/; /y/yujie-zhong/; /y/yaqiang-wu/", + "bibtex": "@inproceedings{wang-etal-2022-matchprompt,\n title = \"{M}atch{P}rompt: Prompt-based Open Relation Extraction with Semantic Consistency Guided Clustering\",\n author = \"Wang, Jiaxin and\n Zhang, Lingling and\n Liu, Jun and\n Liang, Xi and\n Zhong, Yujie and\n Wu, Yaqiang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.537/\",\n doi = \"10.18653/v1/2022.emnlp-main.537\",\n pages = \"7875--7888\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.537.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.537/", + "pdf_size": 807054, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1820340360661904663&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6 + }, + { + "id": "2022.findings-emnlp.259", + "title": "McPhraSy: Multi-Context Phrase Similarity and Clustering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Phrase similarity is a key component of many NLP applications. Current phrase similarity methods focus on embedding the phrase itself and use the phrase context only during training of the pretrained model. To better leverage the information in the context, we propose McPhraSy (Multi-context Phrase Similarity), a novel algorithm for estimating the similarity of phrases based on multiple contexts. At inference time, McPhraSy represents each phrase by considering multiple contexts in which it appears and computes the similarity of two phrases by aggregating the pairwise similarities between the contexts of the phrases. Incorporating context during inference enables McPhraSy to outperform current state-of-the-art models on two phrase similarity datasets by up to 13.3%. Finally, we also present a new downstream task that relies on phrase similarity \u2013 keyphrase clustering \u2013 and create a new benchmark for it in the product reviews domain. We show that McPhraSy surpasses all other baselines for this task.", + "author": "Amir Cohen; Hila Gonen; Ori Shapira; Ran Levy; Yoav Goldberg", + "authorids": "/a/amir-cohen/; /h/hila-gonen/; /o/ori-shapira/; /r/ran-levy/; /y/yoav-goldberg/", + "bibtex": "@inproceedings{cohen-etal-2022-mcphrasy,\n title = \"{M}c{P}hra{S}y: Multi-Context Phrase Similarity and Clustering\",\n author = \"Cohen, Amir and\n Gonen, Hila and\n Shapira, Ori and\n Levy, Ran and\n Goldberg, Yoav\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.259/\",\n doi = \"10.18653/v1/2022.findings-emnlp.259\",\n pages = \"3538--3550\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.259.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.259/", + "pdf_size": 589368, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13165650922621948212&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Bar-Ilan University+Amazon; University of Washington+Amazon; Amazon; Amazon; Bar-Ilan University+Allen Institute for Artificial Intelligence", + "aff_domain": "gmail.com;gmail.com;amazon.com;amazon.com;gmail.com", + "email": "gmail.com;gmail.com;amazon.com;amazon.com;gmail.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2+1;1;1;0+3", + "aff_unique_norm": "Bar-Ilan University;Amazon.com, Inc.;University of Washington;Allen Institute for Artificial Intelligence", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.biu.ac.il;https://www.amazon.com;https://www.washington.edu;https://allenai.org", + "aff_unique_abbr": "BIU;Amazon;UW;AI2", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1+1;1;1;0+1", + "aff_country_unique": "Israel;United States" + }, + { + "id": "2022.emnlp-main.320", + "title": "McQueen: a Benchmark for Multimodal Conversational Query Rewrite", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The task of query rewrite aims to convert an in-context query to its fully-specified version where ellipsis and coreference are completed and referred-back according to the history context. Although much progress has been made, less efforts have been paid to real scenario conversations that involve drawing information from more than one modalities. In this paper, we propose the task of multimodal conversational query rewrite (McQR), which performs query rewrite under the multimodal visual conversation setting. We collect a large-scale dataset named McQueen based on manual annotation, which contains 15k visual conversations and over 80k queries where each one is associated with a fully-specified rewrite version. In addition, for entities appearing in the rewrite, we provide the corresponding image box annotation. We then use the McQueen dataset to benchmark a state-of-the-art method for effectively tackling the McQR task, which is based on a multimodal pre-trained model with pointer generator. Extensive experiments are performed to demonstrate the effectiveness of our model on this task.", + "author": "Yifei Yuan; Chen Shi; Runze Wang; Liyi Chen; Feijun Jiang; Yuan You; Wai Lam", + "authorids": "/y/yifei-yuan/; /c/chen-shi/; /r/runze-wang/; /l/liyi-chen/; /f/feijun-jiang/; /y/yuan-you/; /w/wai-lam/", + "bibtex": "@inproceedings{yuan-etal-2022-mcqueen,\n title = \"{M}c{Q}ueen: a Benchmark for Multimodal Conversational Query Rewrite\",\n author = \"Yuan, Yifei and\n Shi, Chen and\n Wang, Runze and\n Chen, Liyi and\n Jiang, Feijun and\n You, Yuan and\n Lam, Wai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.320/\",\n doi = \"10.18653/v1/2022.emnlp-main.320\",\n pages = \"4834--4844\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.320.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.320/", + "pdf_size": 3958702, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14416348503315030524&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "The Chinese University of Hong Kong; Alibaba Group; Alibaba Group; Nankai University; Alibaba Group; Alibaba Group; The Chinese University of Hong Kong", + "aff_domain": "se.cuhk.edu.hk;alibaba-inc.com;alibaba-inc.com;mail.nankai.edu.cn;alibaba-inc.com;alibaba-inc.com;se.cuhk.edu.hk", + "email": "se.cuhk.edu.hk;alibaba-inc.com;alibaba-inc.com;mail.nankai.edu.cn;alibaba-inc.com;alibaba-inc.com;se.cuhk.edu.hk", + "github": "https://github.com/yfyuan01/MQR", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;2;1;1;0", + "aff_unique_norm": "The Chinese University of Hong Kong;Alibaba Group;Nankai University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.cuhk.edu.hk;https://www.alibaba.com;http://www.nankai.edu.cn", + "aff_unique_abbr": "CUHK;Alibaba;NKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.161", + "title": "Measurement Extraction with Natural Language Processing: A Review", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Quantitative data is important in many domains. Information extraction methods draw structured data from documents. However, the extraction of quantities and their contexts has received little attention in the history of information extraction. In this review, an overview of prior work on measurement extraction is presented. We describe different approaches to measurement extraction and outline the challenges posed by this task. The review concludes with an outline of potential future research. Research strains in measurement extraction tend to be isolated and lack a common terminology. Improvements in numerical reasoning, more extensive datasets, and the consideration of wider contexts may lead to significant improvements in measurement extraction.", + "author": "Jan G\u00f6pfert; Patrick Kuckertz; Jann Weinand; Leander Kotzur; Detlef Stolten", + "authorids": "/j/jan-gopfert/; /p/patrick-kuckertz/; /j/jann-weinand/; /l/leander-kotzur/; /d/detlef-stolten/", + "bibtex": "@inproceedings{gopfert-etal-2022-measurement,\n title = \"Measurement Extraction with Natural Language Processing: A Review\",\n author = {G{\\\"o}pfert, Jan and\n Kuckertz, Patrick and\n Weinand, Jann and\n Kotzur, Leander and\n Stolten, Detlef},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.161/\",\n doi = \"10.18653/v1/2022.findings-emnlp.161\",\n pages = \"2191--2215\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.161.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.161/", + "pdf_size": 1181192, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9683852357978928057&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Institute of Energy and Climate Research, Techno-economic Systems Analysis (IEK-3), Forschungszentrum J\u00fclich, 52425 J\u00fclich, Germany+Chair for Fuel Cells, RWTH Aachen University, c/o IEK-3, Forschungszentrum J\u00fclich, 52425 J\u00fclich, Germany; Institute of Energy and Climate Research, Techno-economic Systems Analysis (IEK-3), Forschungszentrum J\u00fclich, 52425 J\u00fclich, Germany; Institute of Energy and Climate Research, Techno-economic Systems Analysis (IEK-3), Forschungszentrum J\u00fclich, 52425 J\u00fclich, Germany; Institute of Energy and Climate Research, Techno-economic Systems Analysis (IEK-3), Forschungszentrum J\u00fclich, 52425 J\u00fclich, Germany; Institute of Energy and Climate Research, Techno-economic Systems Analysis (IEK-3), Forschungszentrum J\u00fclich, 52425 J\u00fclich, Germany+Chair for Fuel Cells, RWTH Aachen University, c/o IEK-3, Forschungszentrum J\u00fclich, 52425 J\u00fclich, Germany", + "aff_domain": "fz-juelich.de; ; ; ; ", + "email": "fz-juelich.de; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;0;0;0+1", + "aff_unique_norm": "Forschungszentrum J\u00fclich;RWTH Aachen University", + "aff_unique_dep": "Institute of Energy and Climate Research, Techno-economic Systems Analysis (IEK-3);Chair for Fuel Cells", + "aff_unique_url": "https://www.fz-juelich.de;https://www.rwth-aachen.de", + "aff_unique_abbr": "FZJ;RWTH", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "J\u00fclich;", + "aff_country_unique_index": "0+0;0;0;0;0+0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.173", + "title": "Measuring Context-Word Biases in Lexical Semantic Datasets", + "track": "main", + "status": "Main", + "award": false, + "abstract": "State-of-the-art pretrained contextualized models (PCM) eg. BERT use tasks such as WiC and WSD to evaluate their word-in-context representations. This inherently assumes that performance in these tasks reflect how well a model represents the coupled word and context semantics. We question this assumption by presenting the first quantitative analysis on the context-word interaction being tested in major contextual lexical semantic tasks. To achieve this, we run probing baselines on masked input, and propose measures to calculate and visualize the degree of context or word biases in existing datasets. The analysis was performed on both models and humans. Our findings demonstrate that models are usually not being tested for word-in-context semantics in the same way as humans are in these tasks, which helps us better understand the model-human gap. Specifically, to PCMs, most existing datasets fall into the extreme ends (the retrieval-based tasks exhibit strong target word bias while WiC-style tasks and WSD show strong context bias); In comparison, humans are less biased and achieve much better performance when both word and context are available than with masked input. We recommend our framework for understanding and controlling these biases for model interpretation and future task design.", + "author": "Qianchu Liu; Diana McCarthy; Anna Korhonen", + "authorids": "/q/qianchu-liu/; /d/diana-mccarthy/; /a/anna-korhonen/", + "bibtex": "@inproceedings{liu-etal-2022-measuring,\n title = \"Measuring Context-Word Biases in Lexical Semantic Datasets\",\n author = \"Liu, Qianchu and\n McCarthy, Diana and\n Korhonen, Anna\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.173/\",\n doi = \"10.18653/v1/2022.emnlp-main.173\",\n pages = \"2699--2713\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.173.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.173/", + "pdf_size": 1317093, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5519951730236857067&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Language Technology Lab, TAL, University of Cambridge, UK; Language Technology Lab, TAL, University of Cambridge, UK; Language Technology Lab, TAL, University of Cambridge, UK", + "aff_domain": "cam.ac.uk;dianamccarthy.co.uk;cam.ac.uk", + "email": "cam.ac.uk;dianamccarthy.co.uk;cam.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Cambridge", + "aff_unique_dep": "Language Technology Lab, TAL", + "aff_unique_url": "https://www.cam.ac.uk", + "aff_unique_abbr": "Cambridge", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.findings-emnlp.66", + "title": "Measuring and Improving Semantic Diversity of Dialogue Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Response diversity has become an important criterion for evaluating the quality of open-domain dialogue generation models. However, current evaluation metrics for response diversity often fail to capture the semantic diversity of generated responses, as they mainly consider lexical aspects of the generated responses. In this paper, we introduce a new automatic evaluation metric to measure the semantic diversity of generated responses. Through human evaluation, we demonstrate that our proposed metric captures human judgments on response diversity better than existing lexical-level diversity metrics. Furthermore, motivated by analyzing an existing dialogue dataset, we propose a simple yet effective learning method that improves the semantic diversity of generated responses. Our learning method weights training samples based on the semantic distribution of the training set.We show that our learning method improves response diversity and coherency better than other baseline methods through automatic and human evaluation.", + "author": "Seungju Han; Beomsu Kim; Buru Chang", + "authorids": "/s/seungju-han/; /b/beomsu-kim/; /b/buru-chang/", + "bibtex": "@inproceedings{han-etal-2022-measuring,\n title = \"Measuring and Improving Semantic Diversity of Dialogue Generation\",\n author = \"Han, Seungju and\n Kim, Beomsu and\n Chang, Buru\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.66/\",\n doi = \"10.18653/v1/2022.findings-emnlp.66\",\n pages = \"934--950\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.66.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.66/", + "pdf_size": 1739332, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4340971394870729730&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Hyperconnect; Hyperconnect; Hyperconnect", + "aff_domain": "snu.ac.kr;hpcnt.com;hpcnt.com", + "email": "snu.ac.kr;hpcnt.com;hpcnt.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Hyperconnect", + "aff_unique_dep": "", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "", + "aff_country_unique": "" + }, + { + "id": "2022.emnlp-main.595", + "title": "Measuring the Mixing of Contextual Information in the Transformer", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The Transformer architecture aggregates input information through the self-attention mechanism, but there is no clear understanding of how this information is mixed across the entire model. Additionally, recent works have demonstrated that attention weights alone are not enough to describe the flow of information. In this paper, we consider the whole attention block \u2013multi-head attention, residual connection, and layer normalization\u2013 and define a metric to measure token-to-token interactions within each layer. Then, we aggregate layer-wise interpretations to provide input attribution scores for model predictions. Experimentally, we show that our method, ALTI (Aggregation of Layer-wise Token-to-token Interactions), provides more faithful explanations and increased robustness than gradient-based methods.", + "author": "Javier Ferrando; Gerard I. G\u00e1llego; Marta R. Costa-juss\u00e0", + "authorids": "/j/javier-ferrando/; /g/gerard-i-gallego/; /m/marta-r-costa-jussa/", + "bibtex": "@inproceedings{ferrando-etal-2022-measuring,\n title = \"Measuring the Mixing of Contextual Information in the Transformer\",\n author = \"Ferrando, Javier and\n G{\\'a}llego, Gerard I. and\n Costa-juss{\\`a}, Marta R.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.595/\",\n doi = \"10.18653/v1/2022.emnlp-main.595\",\n pages = \"8698--8714\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.595.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.595/", + "pdf_size": 3297879, + "gs_citation": 53, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18026123775544093107&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff": "TALP Research Center, Universitat Polit\u00e8cnica de Catalunya; TALP Research Center, Universitat Polit\u00e8cnica de Catalunya; TALP Research Center, Universitat Polit\u00e8cnica de Catalunya", + "aff_domain": "upc.edu;upc.edu;upc.edu", + "email": "upc.edu;upc.edu;upc.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Universitat Polit\u00e8cnica de Catalunya", + "aff_unique_dep": "TALP Research Center", + "aff_unique_url": "https://www.upc.edu", + "aff_unique_abbr": "UPC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Spain" + }, + { + "id": "2022.emnlp-main.256", + "title": "MedCLIP: Contrastive Learning from Unpaired Medical Images and Text", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing vision-text contrastive learning like CLIP aims to match the paired image and caption embeddings while pushing others apart, which improves representation transferability and supports zero-shot prediction. However, medical image-text datasets are orders of magnitude below the general images and captions from the internet. Moreover, previous methods encounter many false negatives, i.e., images and reports from separate patients probably carry the same semantics but are wrongly treated as negatives. In this paper, we decouple images and texts for multimodal contrastive learning, thus scaling the usable training data in a combinatorial magnitude with low cost. We also propose to replace the InfoNCE loss with semantic matching loss based on medical knowledge to eliminate false negatives in contrastive learning. We prove that MedCLIP is a simple yet effective framework: it outperforms state-of-the-art methods on zero-shot prediction, supervised classification, and image-text retrieval. Surprisingly, we observe that with only 20K pre-training data, MedCLIP wins over the state-of-the-art method (using 200K data). The code is available at https://github.com/RyanWangZf/MedCLIP.", + "author": "Zifeng Wang; Zhenbang Wu; Dinesh Agarwal; Jimeng Sun", + "authorids": "/z/zifeng-wang/; /z/zhenbang-wu/; /d/dinesh-agarwal/; /j/jimeng-sun/", + "bibtex": "@inproceedings{wang-etal-2022-medclip,\n title = \"{M}ed{CLIP}: Contrastive Learning from Unpaired Medical Images and Text\",\n author = \"Wang, Zifeng and\n Wu, Zhenbang and\n Agarwal, Dinesh and\n Sun, Jimeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.256/\",\n doi = \"10.18653/v1/2022.emnlp-main.256\",\n pages = \"3876--3887\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.256.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.256/", + "pdf_size": 1485184, + "gs_citation": 478, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12749622247428621639&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Department of Computer Science, University of Illinois Urbana-Champaign; Department of Computer Science, University of Illinois Urbana-Champaign; Adobe + Department of Computer Science, University of Illinois Urbana-Champaign; Department of Computer Science, University of Illinois Urbana-Champaign + Carle Illinois College of Medicine, University of Illinois Urbana-Champaign", + "aff_domain": "illinois.edu;illinois.edu;adobe.com;illinois.edu", + "email": "illinois.edu;illinois.edu;adobe.com;illinois.edu", + "github": "https://github.com/RyanWangZf/MedCLIP", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1+0;0+0", + "aff_unique_norm": "University of Illinois Urbana-Champaign;Adobe Inc.", + "aff_unique_dep": "Department of Computer Science;", + "aff_unique_url": "https://illinois.edu;https://www.adobe.com", + "aff_unique_abbr": "UIUC;Adobe", + "aff_campus_unique_index": "0;0;0;0+0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.805", + "title": "MedJEx: A Medical Jargon Extraction Model with Wiki\u2019s Hyperlink Span and Contextualized Masked Language Model Score", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This paper proposes a new natural language processing (NLP) application for identifying medical jargon terms potentially difficult for patients to comprehend from electronic health record (EHR) notes. We first present a novel and publicly available dataset with expert-annotated medical jargon terms from 18K+ EHR note sentences (MedJ). Then, we introduce a novel medical jargon extraction (MedJEx) model which has been shown to outperform existing state-of-the-art NLP models. First, MedJEx improved the overall performance when it was trained on an auxiliary Wikipedia hyperlink span dataset, where hyperlink spans provide additional Wikipedia articles to explain the spans (or terms), and then fine-tuned on the annotated MedJ data. Secondly, we found that a contextualized masked language model score was beneficial for detecting domain-specific unfamiliar jargon terms. Moreover, our results show that training on the auxiliary Wikipedia hyperlink span datasets improved six out of eight biomedical named entity recognition benchmark datasets. MedJEx is publicly available.", + "author": "Sunjae Kwon; Zonghai Yao; Harmon Jordan; David Levy; Brian Corner; Hong Yu", + "authorids": "/s/sunjae-kwon/; /z/zonghai-yao/; /h/harmon-jordan/; /d/david-levy/; /b/brian-corner/; /h/hong-yu/", + "bibtex": "@inproceedings{kwon-etal-2022-medjex,\n title = \"{M}ed{JE}x: A Medical Jargon Extraction Model with {W}iki`s Hyperlink Span and Contextualized Masked Language Model Score\",\n author = \"Kwon, Sunjae and\n Yao, Zonghai and\n Jordan, Harmon and\n Levy, David and\n Corner, Brian and\n Yu, Hong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.805/\",\n doi = \"10.18653/v1/2022.emnlp-main.805\",\n pages = \"11733--11751\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.805.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.805/", + "pdf_size": 710994, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11967717858696418696&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 9, + "aff": "UMass Amherst; Health Research Consultant; UMass Lowell; UMass Medical School + U.S. Department of Veterans Affairs; UMass Medical School; UMass Lowell + U.S. Department of Veterans Affairs", + "aff_domain": "umass.edu;umass.edu;gmail.com;uml.edu;umassmed.edu;uml.edu", + "email": "umass.edu;umass.edu;gmail.com;uml.edu;umassmed.edu;uml.edu", + "github": "https://github.com/MozziTasteBitter/MedJEx", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3+4;3;2+4", + "aff_unique_norm": "University of Massachusetts Amherst;Health Research Consultant;University of Massachusetts Lowell;University of Massachusetts Medical School;U.S. Department of Veterans Affairs", + "aff_unique_dep": ";;;Medical School;", + "aff_unique_url": "https://www.umass.edu;;https://www.uml.edu;https://www.umassmed.edu;https://www.va.gov", + "aff_unique_abbr": "UMass Amherst;;UMass Lowell;UMass Med;VA", + "aff_campus_unique_index": "0;2;3;3;2", + "aff_campus_unique": "Amherst;;Lowell;Worcester", + "aff_country_unique_index": "0;0;0+0;0;0+0", + "aff_country_unique": "United States;" + }, + { + "id": "2022.findings-emnlp.349", + "title": "MedicalSum: A Guided Clinical Abstractive Summarization Model for Generating Medical Reports from Patient-Doctor Conversations", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We introduce MedicalSum, a transformer-based sequence-to-sequence architecture for summarizing medical conversations by integrating medical domain knowledge from the Unified Medical Language System (UMLS). The novel knowledge augmentation is performed in three ways: (i) introducing a guidance signal that consists of the medical words in the input sequence, (ii) leveraging semantic type knowledge in UMLS to create clinically meaningful input embeddings, and (iii) making use of a novel weighted loss function that provides a stronger incentive for the model to correctly predict words with a medical meaning. By applying these three strategies, MedicalSum takes clinical knowledge into consideration during the summarization process and achieves state-of-the-art ROUGE score improvements of 0.8-2.1 points (including 6.2% ROUGE-1 error reduction in the PE section) when producing medical summaries of patient-doctor conversations.", + "author": "George Michalopoulos; Kyle Williams; Gagandeep Singh; Thomas Lin", + "authorids": "/g/george-michalopoulos/; /k/kyle-williams/; /g/gagandeep-singh/; /t/thomas-lin/", + "bibtex": "@inproceedings{michalopoulos-etal-2022-medicalsum,\n title = \"{M}edical{S}um: A Guided Clinical Abstractive Summarization Model for Generating Medical Reports from Patient-Doctor Conversations\",\n author = \"Michalopoulos, George and\n Williams, Kyle and\n Singh, Gagandeep and\n Lin, Thomas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.349/\",\n doi = \"10.18653/v1/2022.findings-emnlp.349\",\n pages = \"4741--4749\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.349.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.349/", + "pdf_size": 256886, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15388382426134158385&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.183", + "title": "Memory-assisted prompt editing to improve GPT-3 after deployment", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large LMs such as GPT-3 are powerful, but can commit mistakes that are obvious to humans. For example, GPT-3 would mistakenly interpret \u201cWhat word is similar to good?\u201d to mean a homophone, while the user intended a synonym. Our goal is to effectively correct such errors via user interactions with the system but without retraining, which will be prohibitively costly. We pair GPT-3 with a growing memory of recorded cases where the model misunderstood the user\u2019s intents, along with user feedback for clarification. Such a memory allows our system to produce enhanced prompts for any new query based on the user feedback for error correction on similar cases in the past. On four tasks (two lexical tasks, two advanced ethical reasoning tasks), we show how a (simulated) user can interactively teach a deployed GPT-3, substantially increasing its accuracy over the queries with different kinds of misunderstandings by the GPT-3. Our approach is a step towards the low-cost utility enhancement for very large pre-trained LMs.", + "author": "Aman Madaan; Niket Tandon; Peter Clark; Yiming Yang", + "authorids": "/a/aman-madaan/; /n/niket-tandon/; /p/peter-clark/; /y/yiming-yang/", + "bibtex": "@inproceedings{madaan-etal-2022-memory,\n title = \"Memory-assisted prompt editing to improve {GPT}-3 after deployment\",\n author = \"Madaan, Aman and\n Tandon, Niket and\n Clark, Peter and\n Yang, Yiming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.183/\",\n doi = \"10.18653/v1/2022.emnlp-main.183\",\n pages = \"2833--2861\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.183.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.183/", + "pdf_size": 2574844, + "gs_citation": 136, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16500204067779963998&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Language Technologies Institute, Carnegie Mellon University, Pittsburgh, PA, USA + Allen Institute for Artificial Intelligence, Seattle, WA, USA; Language Technologies Institute, Carnegie Mellon University, Pittsburgh, PA, USA + Allen Institute for Artificial Intelligence, Seattle, WA, USA; Allen Institute for Artificial Intelligence, Seattle, WA, USA; Language Technologies Institute, Carnegie Mellon University, Pittsburgh, PA, USA", + "aff_domain": "cs.cmu.edu;allenai.org;allenai.org;cs.cmu.edu", + "email": "cs.cmu.edu;allenai.org;allenai.org;cs.cmu.edu", + "github": "", + "project": "https://www.memprompt.com/", + "author_num": 4, + "aff_unique_index": "0+1;0+1;1;0", + "aff_unique_norm": "Carnegie Mellon University;Allen Institute for Artificial Intelligence", + "aff_unique_dep": "Language Technologies Institute;", + "aff_unique_url": "https://www.cmu.edu;https://allenai.org", + "aff_unique_abbr": "CMU;AI2", + "aff_campus_unique_index": "0+1;0+1;1;0", + "aff_campus_unique": "Pittsburgh;Seattle", + "aff_country_unique_index": "0+0;0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.661", + "title": "Meta-Learning Fast Weight Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Dynamic evaluation of language models (LMs) adapts model parameters at test time using gradient information from previous tokens and substantially improves LM performance. However, it requires over 3x more compute than standard inference. We present Fast Weight Layers (FWLs), a neural component that provides the benefits of dynamic evaluation much more efficiently by expressing gradient updates as linear attention. A key improvement over dynamic evaluation is that FWLs can also be applied at training time, so the model learns to make good use of gradient updates. FWLs can easily be added on top of existing transformer models, require relatively little extra compute or memory to run, and significantly improve language modeling perplexity.", + "author": "Kevin Clark; Kelvin Guu; Ming-Wei Chang; Panupong Pasupat; Geoffrey Hinton; Mohammad Norouzi", + "authorids": "/k/kevin-clark/; /k/kelvin-guu/; /m/ming-wei-chang/; /p/panupong-pasupat/; /g/geoffrey-hinton/; /m/mohammad-norouzi/", + "bibtex": "@inproceedings{clark-etal-2022-meta,\n title = \"Meta-Learning Fast Weight Language Models\",\n author = \"Clark, Kevin and\n Guu, Kelvin and\n Chang, Ming-Wei and\n Pasupat, Panupong and\n Hinton, Geoffrey and\n Norouzi, Mohammad\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.661/\",\n doi = \"10.18653/v1/2022.emnlp-main.661\",\n pages = \"9751--9757\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.661.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.661/", + "pdf_size": 304323, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4610525434083018307&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 3, + "aff": "Google Research; Google Research; Google Research; Google Research; Google Research; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.34", + "title": "Meta-learning Pathologies from Radiology Reports using Variance Aware Prototypical Networks", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Large pretrained Transformer-based language models like BERT and GPT have changed the landscape of Natural Language Processing (NLP). However, fine tuning such models still requires a large number of training examples for each target task, thus annotating multiple datasets and training these models on various downstream tasks becomes time consuming and expensive. In this work, we propose a simple extension of the Prototypical Networks for few-shot text classification. Our main idea is to replace the class prototypes by Gaussians and introduce a regularization term that encourages the examples to be clustered near the appropriate class centroids. Experimental results show that our method outperforms various strong baselines on 13 public and 4 internal datasets. Furthermore, we use the class distributions as a tool for detecting potential out-of-distribution (OOD) data points during deployment.", + "author": "Arijit Sehanobish; Kawshik Kannan; Nabila Abraham; Anasuya Das; Benjamin Odry", + "authorids": "/a/arijit-sehanobish/; /k/kawshik-kannan/; /n/nabila-abraham/; /a/anasuya-das/; /b/benjamin-odry/", + "bibtex": "@inproceedings{sehanobish-etal-2022-meta,\n title = \"Meta-learning Pathologies from Radiology Reports using Variance Aware Prototypical Networks\",\n author = \"Sehanobish, Arijit and\n Kannan, Kawshik and\n Abraham, Nabila and\n Das, Anasuya and\n Odry, Benjamin\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.34/\",\n doi = \"10.18653/v1/2022.emnlp-industry.34\",\n pages = \"332--347\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.34.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.34/", + "pdf_size": 2687773, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15353287371418719181&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.76", + "title": "MetaASSIST: Robust Dialogue State Tracking with Meta Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing dialogue datasets contain lots of noise in their state annotations. Such noise can hurt model training and ultimately lead to poor generalization performance. A general framework named ASSIST has recently been proposed to train robust dialogue state tracking (DST) models. It introduces an auxiliary model to generate pseudo labels for the noisy training set. These pseudo labels are combined with vanilla labels by a common fixed weighting parameter to train the primary DST model. Notwithstanding the improvements of ASSIST on DST, tuning the weighting parameter is challenging. Moreover, a single parameter shared by all slots and all instances may be suboptimal. To overcome these limitations, we propose a meta learning-based framework MetaASSIST to adaptively learn the weighting parameter. Specifically, we propose three schemes with varying degrees of flexibility, ranging from slot-wise to both slot-wise and instance-wise, to convert the weighting parameter into learnable functions. These functions are trained in a meta-learning manner by taking the validation set as meta data. Experimental results demonstrate that all three schemes can achieve competitive performance. Most impressively, we achieve a state-of-the-art joint goal accuracy of 80.10% on MultiWOZ 2.4.", + "author": "Fanghua Ye; Xi Wang; Jie Huang; Shenghui Li; Samuel Stern; Emine Yilmaz", + "authorids": "/f/fanghua-ye/; /x/xi-wang/; /j/jie-huang/; /s/shenghui-li/; /s/samuel-stern/; /e/emine-yilmaz/", + "bibtex": "@inproceedings{ye-etal-2022-metaassist,\n title = \"{M}eta{ASSIST}: Robust Dialogue State Tracking with Meta Learning\",\n author = \"Ye, Fanghua and\n Wang, Xi and\n Huang, Jie and\n Li, Shenghui and\n Stern, Samuel and\n Yilmaz, Emine\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.76/\",\n doi = \"10.18653/v1/2022.emnlp-main.76\",\n pages = \"1157--1169\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.76.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.76/", + "pdf_size": 1626192, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15513352698556656545&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.341", + "title": "MetaFill: Text Infilling for Meta-Path Generation on Heterogeneous Information Networks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Heterogeneous information network (HIN) is essential to study complicated networks containing multiple edge types and node types. Meta-path, a sequence of node types and edge types, is the core technique to embed HINs. Since manually curating meta-paths is time-consuming, there is a pressing need to develop automated meta-path generation approaches. Existing meta-path generation approaches cannot fully exploit the rich textual information in HINs, such as node names and edge type names. To address this problem, we propose MetaFill, a text-infilling-based approach for meta-path generation. The key idea of MetaFill is to formulate meta-path identification problem as a word sequence infilling problem, which can be advanced by pretrained language models (PLMs). We observed the superior performance of MetaFill against existing meta-path generation methods and graph embedding methods that do not leverage meta-paths in both link prediction and node classification on two real-world HIN datasets. We further demonstrated how MetaFill can accurately classify edges in the zero-shot setting, where existing approaches cannot generate any meta-paths. MetaFill exploits PLMs to generate meta-paths for graph embedding, opening up new avenues for language model applications in graph analysis.", + "author": "Zequn Liu; Kefei Duan; Junwei Yang; Hanwen Xu; Ming Zhang; Sheng Wang", + "authorids": "/z/zequn-liu/; /k/kefei-duan/; /j/junwei-yang/; /h/hanwen-xu/; /m/ming-zhang/; /s/sheng-wang/", + "bibtex": "@inproceedings{liu-etal-2022-metafill,\n title = \"{M}eta{F}ill: Text Infilling for Meta-Path Generation on Heterogeneous Information Networks\",\n author = \"Liu, Zequn and\n Duan, Kefei and\n Yang, Junwei and\n Xu, Hanwen and\n Zhang, Ming and\n Wang, Sheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.341/\",\n doi = \"10.18653/v1/2022.emnlp-main.341\",\n pages = \"5110--5122\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.341.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.341/", + "pdf_size": 4933139, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11876666947668437575&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "School of Computer Science, Peking University, Beijing, China; School of EECS, Peking University, Beijing, China; School of Computer Science, Peking University, Beijing, China; Paul G. Allen School of Computer Science and Engineering, University of Washington, Seattle, WA; School of Computer Science, Peking University, Beijing, China; Paul G. Allen School of Computer Science and Engineering, University of Washington, Seattle, WA", + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;cs.washington.edu;pku.edu.cn;cs.washington.edu", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;cs.washington.edu;pku.edu.cn;cs.washington.edu", + "github": "https://github.com/zequnl/MetaFill", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;1", + "aff_unique_norm": "Peking University;University of Washington", + "aff_unique_dep": "School of Computer Science;Paul G. Allen School of Computer Science and Engineering", + "aff_unique_url": "http://www.pku.edu.cn;https://www.washington.edu", + "aff_unique_abbr": "PKU;UW", + "aff_campus_unique_index": "0;0;0;1;0;1", + "aff_campus_unique": "Beijing;Seattle", + "aff_country_unique_index": "0;0;0;1;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.310", + "title": "MetaLogic: Logical Reasoning Explanations with Fine-Grained Structure", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we propose a comprehensive benchmark to investigate models\u2019 logical reasoning capabilities in complex real-life scenarios. Current explanation datasets often employ synthetic data with simple reasoning structures. Therefore, it cannot express more complex reasoning processes, such as the rebuttal to a reasoning step and the degree of certainty of the evidence. To this end, we propose a comprehensive logical reasoning explanation form. Based on the multi-hop chain of reasoning, the explanation form includes three main components: (1) The condition of rebuttal that the reasoning node can be challenged; (2) Logical formulae that uncover the internal texture of reasoning nodes; (3) Reasoning strength indicated by degrees of certainty. The fine-grained structure conforms to the real logical reasoning scenario, better fitting the human cognitive process but, simultaneously, is more challenging for the current models. We evaluate the current best models\u2019 performance on this new explanation form. The experimental results show that generating reasoning graphs remains a challenging task for current models, even with the help of giant pre-trained language models.", + "author": "Yinya Huang; Hongming Zhang; Ruixin Hong; Xiaodan Liang; Changshui Zhang; Dong Yu", + "authorids": "/y/yinya-huang/; /h/hongming-zhang/; /r/ruixin-hong/; /x/xiaodan-liang/; /c/changshui-zhang/; /d/dong-yu/", + "bibtex": "@inproceedings{huang-etal-2022-metalogic,\n title = \"{M}eta{L}ogic: Logical Reasoning Explanations with Fine-Grained Structure\",\n author = \"Huang, Yinya and\n Zhang, Hongming and\n Hong, Ruixin and\n Liang, Xiaodan and\n Zhang, Changshui and\n Yu, Dong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.310/\",\n doi = \"10.18653/v1/2022.emnlp-main.310\",\n pages = \"4698--4724\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.310.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.310/", + "pdf_size": 4147518, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8156715988182980113&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Shenzhen Campus of Sun Yat-sen University+Tencent AI Lab, Seattle; Tencent AI Lab, Seattle; Tsinghua University; Shenzhen Campus of Sun Yat-sen University+Pengcheng Laboratory; Tsinghua University; Tencent AI Lab, Seattle", + "aff_domain": "hotmail.com;global.tencent.com;global.tencent.com;mails.tsinghua.edu.cn;mail.tsinghua.edu.cn;gmail.com", + "email": "hotmail.com;global.tencent.com;global.tencent.com;mails.tsinghua.edu.cn;mail.tsinghua.edu.cn;gmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;2;0+3;2;1", + "aff_unique_norm": "Sun Yat-sen University;Tencent;Tsinghua University;Pengcheng Laboratory", + "aff_unique_dep": ";AI Lab;;", + "aff_unique_url": "http://www.sysu.edu.cn/;https://ai.tencent.com;https://www.tsinghua.edu.cn;", + "aff_unique_abbr": "SYSU;Tencent AI Lab;THU;", + "aff_campus_unique_index": "0+1;1;0;1", + "aff_campus_unique": "Shenzhen;Seattle;", + "aff_country_unique_index": "0+1;1;0;0+0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.487", + "title": "MetaTKG: Learning Evolutionary Meta-Knowledge for Temporal Knowledge Graph Reasoning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Reasoning over Temporal Knowledge Graphs (TKGs) aims to predict future facts based on given history. One of the key challenges for prediction is to learn the evolution of facts. Most existing works focus on exploring evolutionary information in history to obtain effective temporal embeddings for entities and relations, but they ignore the variation in evolution patterns of facts, which makes them struggle to adapt to future data with different evolution patterns. Moreover, new entities continue to emerge along with the evolution of facts over time. Since existing models highly rely on historical information to learn embeddings for entities, they perform poorly on such entities with little historical information. To tackle these issues, we propose a novel Temporal Meta-learning framework for TKG reasoning, MetaTKG for brevity. Specifically, our method regards TKG prediction as many temporal meta-tasks, and utilizes the designed Temporal Meta-learner to learn evolutionary meta-knowledge from these meta-tasks. The proposed method aims to guide the backbones to learn to adapt quickly to future data and deal with entities with little historical information by the learned meta-knowledge. Specially, in temporal meta-learner, we design a Gating Integration module to adaptively establish temporal correlations between meta-tasks. Extensive experiments on four widely-used datasets and three backbones demonstrate that our method can greatly improve the performance.", + "author": "Yuwei Xia; Mengqi Zhang; Qiang Liu; Shu Wu; Xiao-Yu Zhang", + "authorids": "/y/yuwei-xia/; /m/mengqi-zhang/; /q/qiang-liu/; /s/shu-wu/; /x/xiao-yu-zhang/", + "bibtex": "https://aclanthology.org/2022.emnlp-main.487.bib", + "pdf": "https://aclanthology.org/2022.emnlp-main.487.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.487/", + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18430793193076984390&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.53", + "title": "Metric-guided Distillation: Distilling Knowledge from the Metric to Ranker and Retriever for Generative Commonsense Reasoning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Commonsense generation aims to generate a realistic sentence describing a daily scene under the given concepts, which is very challenging, since it requires models to have relational reasoning and compositional generalization capabilities. Previous work focuses on retrieving prototype sentences for the provided concepts to assist generation. They first use a sparse retriever to retrieve candidate sentences, then re-rank the candidates with a ranker. However, the candidates returned by their ranker may not be the most relevant sentences, since the ranker treats all candidates equally without considering their relevance to the reference sentences of the given concepts. Another problem is that re-ranking is very expensive, but only using retrievers will seriously degrade the performance of their generation models. To solve these problems, we propose the metric distillation rule to distill knowledge from the metric (e.g., BLEU) to the ranker. We further transfer the critical knowledge summarized by the distilled ranker to the retriever. In this way, the relevance scores of candidate sentences predicted by the ranker and retriever will be more consistent with their quality measured by the metric. Experimental results on the CommonGen benchmark verify the effectiveness of our proposed method: (1) Our generation model with the distilled ranker achieves a new state-of-the-art result. (2) Our generation model with the distilled retriever even surpasses the previous SOTA.", + "author": "Xingwei He; Yeyun Gong; A-Long Jin; Weizhen Qi; Hang Zhang; Jian Jiao; Bartuer Zhou; Biao Cheng; Sm Yiu; Nan Duan", + "authorids": "/x/xingwei-he/; /y/yeyun-gong/; /a/a-long-jin/; /w/weizhen-qi/; /h/hang-zhang/; /j/jian-jiao/; /b/bartuer-zhou/; /b/biao-cheng/; /s/sm-yiu/; /n/nan-duan/", + "bibtex": "@inproceedings{he-etal-2022-metric,\n title = \"Metric-guided Distillation: Distilling Knowledge from the Metric to Ranker and Retriever for Generative Commonsense Reasoning\",\n author = \"He, Xingwei and\n Gong, Yeyun and\n Jin, A-Long and\n Qi, Weizhen and\n Zhang, Hang and\n Jiao, Jian and\n Zhou, Bartuer and\n Cheng, Biao and\n Yiu, Sm and\n Duan, Nan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.53/\",\n doi = \"10.18653/v1/2022.emnlp-main.53\",\n pages = \"839--852\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.53.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.53/", + "pdf_size": 715231, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16040079281364912899&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "The University of Hong Kong; Microsoft Research Asia; University of Science and Technology of China; Microsoft; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; Microsoft Research Asia; The University of Hong Kong; Microsoft Research Asia", + "aff_domain": "gmail.com;microsoft.com;eee.hku.hk;mail.ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;cs.hku.hk;microsoft.com", + "email": "gmail.com;microsoft.com;eee.hku.hk;mail.ustc.edu.cn;microsoft.com;microsoft.com;microsoft.com;microsoft.com;cs.hku.hk;microsoft.com", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;1;2;3;1;1;1;1;0;1", + "aff_unique_norm": "The University of Hong Kong;Microsoft Research;University of Science and Technology of China;Microsoft Corporation", + "aff_unique_dep": ";Research;;", + "aff_unique_url": "https://www.hku.hk;https://www.microsoft.com/en-us/research/group/asia;http://www.ustc.edu.cn;https://www.microsoft.com", + "aff_unique_abbr": "HKU;MSR Asia;USTC;Microsoft", + "aff_campus_unique_index": "1;1;1;1;1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;1;0;0;0;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.94", + "title": "MiST: a Large-Scale Annotated Resource and Neural Models for Functions of Modal Verbs in English Scientific Text", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Modal verbs (e.g., can, should or must) occur highly frequently in scientific articles. Decoding their function is not straightforward: they are often used for hedging, but they may also denote abilities and restrictions. Understanding their meaning is important for accurate information extraction from scientific text.To foster research on the usage of modals in this genre, we introduce the MIST (Modals In Scientific Text) dataset, which contains 3737 modal instances in five scientific domains annotated for their semantic, pragmatic, or rhetorical function. We systematically evaluate a set of competitive neural architectures on MIST. Transfer experiments reveal that leveraging non-scientific data is of limited benefit for modeling the distinctions in MIST. Our corpus analysis provides evidence that scientific communities differ in their usage of modal verbs, yet, classifiers trained on scientific data generalize to some extent to unseen scientific domains.", + "author": "Sophie Henning; Nicole Macher; Stefan Gr\u00fcnewald; Annemarie Friedrich", + "authorids": "/s/sophie-henning/; /n/nicole-macher/; /s/stefan-grunewald/; /a/annemarie-friedrich/", + "bibtex": "@inproceedings{henning-etal-2022-mist,\n title = \"{M}i{ST}: a Large-Scale Annotated Resource and Neural Models for Functions of Modal Verbs in {E}nglish Scientific Text\",\n author = {Henning, Sophie and\n Macher, Nicole and\n Gr{\\\"u}newald, Stefan and\n Friedrich, Annemarie},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.94/\",\n doi = \"10.18653/v1/2022.findings-emnlp.94\",\n pages = \"1305--1324\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.94.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.94/", + "pdf_size": 441051, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18442966282416518776&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Bosch Center for Artificial Intelligence, Renningen, Germany+Center for Information and Language Processing, LMU Munich, Germany; Bosch Center for Artificial Intelligence, Renningen, Germany; Bosch Center for Artificial Intelligence, Renningen, Germany+Institut f\u00fcr Maschinelle Sprachverarbeitung, University of Stuttgart, Germany; Bosch Center for Artificial Intelligence, Renningen, Germany", + "aff_domain": "de.bosch.com;gmail.com;de.bosch.com;de.bosch.com", + "email": "de.bosch.com;gmail.com;de.bosch.com;de.bosch.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;0+2;0", + "aff_unique_norm": "Bosch Center for Artificial Intelligence;LMU Munich;University of Stuttgart", + "aff_unique_dep": "Artificial Intelligence;Center for Information and Language Processing;Institut f\u00fcr Maschinelle Sprachverarbeitung", + "aff_unique_url": "https://www.bosch-ai.com;https://www.lmu.de;https://www.uni-stuttgart.de", + "aff_unique_abbr": "BCAI;LMU;", + "aff_campus_unique_index": "0+1;0;0;0", + "aff_campus_unique": "Renningen;Munich;", + "aff_country_unique_index": "0+0;0;0+0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.311", + "title": "Mind Your Bias: A Critical Review of Bias Detection Methods for Contextual Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The awareness and mitigation of biases are of fundamental importance for the fair and transparent use of contextual language models, yet they crucially depend on the accurate detection of biases as a precursor. Consequently, numerous bias detection methods have been proposed, which vary in their approach, the considered type of bias, and the data used for evaluation. However, while most detection methods are derived from the word embedding association test for static word embeddings, the reported results are heterogeneous, inconsistent, and ultimately inconclusive. To address this issue, we conduct a rigorous analysis and comparison of bias detection methods for contextual language models. Our results show that minor design and implementation decisions (or errors) have a substantial and often significant impact on the derived bias scores. Overall, we find the state of the field to be both worse than previously acknowledged due to systematic and propagated errors in implementations, yet better than anticipated since divergent results in the literature homogenize after accounting for implementation errors. Based on our findings, we conclude with a discussion of paths towards more robust and consistent bias detection methods.", + "author": "Silke Husse; Andreas Spitz", + "authorids": "/s/silke-husse/; /a/andreas-spitz/", + "bibtex": "@inproceedings{husse-spitz-2022-mind,\n title = \"Mind Your Bias: A Critical Review of Bias Detection Methods for Contextual Language Models\",\n author = \"Husse, Silke and\n Spitz, Andreas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.311/\",\n doi = \"10.18653/v1/2022.findings-emnlp.311\",\n pages = \"4212--4234\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.311.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.311/", + "pdf_size": 1693649, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4346591862355760056&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "University of Konstanz, Germany; University of Konstanz, Germany", + "aff_domain": "uni.kn;uni.kn", + "email": "uni.kn;uni.kn", + "github": "https://github.com/SilkeHusse/Re-Evaluating-Bias", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Konstanz", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uni-konstanz.de", + "aff_unique_abbr": "Uni Konstanz", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.397", + "title": "Missing Counter-Evidence Renders NLP Fact-Checking Unrealistic for Misinformation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Misinformation emerges in times of uncertainty when credible information is limited. This is challenging for NLP-based fact-checking as it relies on counter-evidence, which may not yet be available. Despite increasing interest in automatic fact-checking, it is still unclear if automated approaches can realistically refute harmful real-world misinformation. Here, we contrast and compare NLP fact-checking with how professional fact-checkers combat misinformation in the absence of counter-evidence. In our analysis, we show that, by design, existing NLP task definitions for fact-checking cannot refute misinformation as professional fact-checkers do for the majority of claims. We then define two requirements that the evidence in datasets must fulfill for realistic fact-checking: It must be (1) sufficient to refute the claim and (2) not leaked from existing fact-checking articles. We survey existing fact-checking datasets and find that all of them fail to satisfy both criteria. Finally, we perform experiments to demonstrate that models trained on a large-scale fact-checking dataset rely on leaked evidence, which makes them unsuitable in real-world scenarios. Taken together, we show that current NLP fact-checking cannot realistically combat real-world misinformation because it depends on unrealistic assumptions about counter-evidence in the data.", + "author": "Max Glockner; Yufang Hou; Iryna Gurevych", + "authorids": "/m/max-glockner/; /y/yufang-hou/; /i/iryna-gurevych/", + "bibtex": "@inproceedings{glockner-etal-2022-missing,\n title = \"Missing Counter-Evidence Renders {NLP} Fact-Checking Unrealistic for Misinformation\",\n author = \"Glockner, Max and\n Hou, Yufang and\n Gurevych, Iryna\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.397/\",\n doi = \"10.18653/v1/2022.emnlp-main.397\",\n pages = \"5916--5936\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.397.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.397/", + "pdf_size": 356458, + "gs_citation": 45, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7629545605544233945&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Ubiquitous Knowledge Processing Lab (UKP Lab), Department of Computer Science and Hessian Center for AI (hessian.AI), Technical University of Darmstadt; IBM Research Europe, Ireland; Ubiquitous Knowledge Processing Lab (UKP Lab), Department of Computer Science and Hessian Center for AI (hessian.AI), Technical University of Darmstadt", + "aff_domain": "ukp.tu-darmstadt.de;ie.ibm.com;ukp.tu-darmstadt.de", + "email": "ukp.tu-darmstadt.de;ie.ibm.com;ukp.tu-darmstadt.de", + "github": "https://github.com/UKPLab/emnlp2022-missing-counter-evidence", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Technical University of Darmstadt;IBM Research Europe", + "aff_unique_dep": "Department of Computer Science;", + "aff_unique_url": "https://www.tu-darmstadt.de;https://www.ibm.com/research/europe", + "aff_unique_abbr": "TU Darmstadt;IBM RE", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Germany;Ireland" + }, + { + "id": "2022.findings-emnlp.211", + "title": "Mitigating Covertly Unsafe Text within Natural Language Systems", + "track": "main", + "status": "finding", + "award": false, + "abstract": "An increasingly prevalent problem for intelligent technologies is text safety, as uncontrolled systems may generate recommendations to their users that lead to injury or life-threatening consequences. However, the degree of explicitness of a generated statement that can cause physical harm varies. In this paper, we distinguish types of text that can lead to physical harm and establish one particularly underexplored category: covertly unsafe text. Then, we further break down this category with respect to the system\u2019s information and discuss solutions to mitigate the generation of text in each of these subcategories. Ultimately, our work defines the problem of covertly unsafe language that causes physical harm and argues that this subtle yet dangerous issue needs to be prioritized by stakeholders and regulators. We highlight mitigation strategies to inspire future researchers to tackle this challenging problem and help improve safety within smart systems.", + "author": "Alex Mei; Anisha Kabir; Sharon Levy; Melanie Subbiah; Emily Allaway; John Judge; Desmond Patton; Bruce Bimber; Kathleen McKeown; William Yang Wang", + "authorids": "/a/alex-mei/; /a/anisha-kabir/; /s/sharon-levy/; /m/melanie-subbiah/; /e/emily-allaway/; /j/john-judge/; /d/desmond-patton/; /b/bruce-bimber/; /k/kathleen-mckeown/; /w/william-yang-wang/", + "bibtex": "@inproceedings{mei-etal-2022-mitigating,\n title = \"Mitigating Covertly Unsafe Text within Natural Language Systems\",\n author = \"Mei, Alex and\n Kabir, Anisha and\n Levy, Sharon and\n Subbiah, Melanie and\n Allaway, Emily and\n Judge, John and\n Patton, Desmond and\n Bimber, Bruce and\n McKeown, Kathleen and\n Wang, William Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.211/\",\n doi = \"10.18653/v1/2022.findings-emnlp.211\",\n pages = \"2914--2926\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.211.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.211/", + "pdf_size": 289331, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8684883118764312787&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 6, + "aff": "University of California, Santa Barbara; University of California, Santa Barbara; University of California, Santa Barbara; Columbia University; Columbia University; University of California, Santa Barbara; University of Pennsylvania; University of California, Santa Barbara; Columbia University; University of California, Santa Barbara", + "aff_domain": "cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu;cs.columbia.edu;cs.columbia.edu;cs.ucsb.edu;upenn.edu;polisci.ucsb.edu;cs.columbia.edu;cs.ucsb.edu", + "email": "cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu;cs.columbia.edu;cs.columbia.edu;cs.ucsb.edu;upenn.edu;polisci.ucsb.edu;cs.columbia.edu;cs.ucsb.edu", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;1;1;0;2;0;1;0", + "aff_unique_norm": "University of California, Santa Barbara;Columbia University;University of Pennsylvania", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ucsb.edu;https://www.columbia.edu;https://www.upenn.edu", + "aff_unique_abbr": "UCSB;Columbia;UPenn", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Santa Barbara;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.176", + "title": "Mitigating Data Sparsity for Short Text Topic Modeling by Topic-Semantic Contrastive Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "To overcome the data sparsity issue in short text topic modeling, existing methods commonly rely on data augmentation or the data characteristic of short texts to introduce more word co-occurrence information. However, most of them do not make full use of the augmented data or the data characteristic: they insufficiently learn the relations among samples in data, leading to dissimilar topic distributions of semantically similar text pairs. To better address data sparsity, in this paper we propose a novel short text topic modeling framework, Topic-Semantic Contrastive Topic Model (TSCTM). To sufficiently model the relations among samples, we employ a new contrastive learning method with efficient positive and negative sampling strategies based on topic semantics. This contrastive learning method refines the representations, enriches the learning signals, and thus mitigates the sparsity issue. Extensive experimental results show that our TSCTM outperforms state-of-the-art baselines regardless of the data augmentation availability, producing high-quality topics and topic distributions.", + "author": "Xiaobao Wu; Anh Tuan Luu; Xinshuai Dong", + "authorids": "/x/xiaobao-wu/; /l/luu-anh-tuan/; /x/xinshuai-dong/", + "bibtex": "@inproceedings{wu-etal-2022-mitigating,\n title = \"Mitigating Data Sparsity for Short Text Topic Modeling by Topic-Semantic Contrastive Learning\",\n author = \"Wu, Xiaobao and\n Luu, Anh Tuan and\n Dong, Xinshuai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.176/\",\n doi = \"10.18653/v1/2022.emnlp-main.176\",\n pages = \"2748--2760\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.176.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.176/", + "pdf_size": 2475738, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=654877714904585800&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Nanyang Technological University; Nanyang Technological University; Carnegie Mellon University", + "aff_domain": "e.ntu.edu.sg;ntu.edu.sg;andrew.cmu.edu", + "email": "e.ntu.edu.sg;ntu.edu.sg;andrew.cmu.edu", + "github": "https://github.com/bobxwu/TSCTM", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Nanyang Technological University;Carnegie Mellon University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ntu.edu.sg;https://www.cmu.edu", + "aff_unique_abbr": "NTU;CMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "Singapore;United States" + }, + { + "id": "2022.emnlp-main.189", + "title": "Mitigating Inconsistencies in Multimodal Sentiment Analysis under Uncertain Missing Modalities", + "track": "main", + "status": "Main", + "award": false, + "abstract": "For the missing modality problem in Multimodal Sentiment Analysis (MSA), the inconsistency phenomenon occurs when the sentiment changes due to the absence of a modality. The absent modality that determines the overall semantic can be considered as a key missing modality. However, previous works all ignored the inconsistency phenomenon, simply discarding missing modalities or solely generating associated features from available modalities. The neglect of the key missing modality case may lead to incorrect semantic results. To tackle the issue, we propose an Ensemble-based Missing Modality Reconstruction (EMMR) network to detect and recover semantic features of the key missing modality. Specifically, we first learn joint representations with remaining modalities via a backbone encoder-decoder network. Then, based on the recovered features, we check the semantic consistency to determine whether the absent modality is crucial to the overall sentiment polarity. Once the inconsistency problem due to the key missing modality exists, we integrate several encoder-decoder approaches for better decision making. Extensive experiments and analyses are conducted on CMU-MOSI and IEMOCAP datasets, validating the superiority of the proposed method.", + "author": "Jiandian Zeng; Jiantao Zhou; Tianyi Liu", + "authorids": "/j/jiandian-zeng/; /j/jiantao-zhou/; /t/tianyi-liu/", + "bibtex": "@inproceedings{zeng-etal-2022-mitigating,\n title = \"Mitigating Inconsistencies in Multimodal Sentiment Analysis under Uncertain Missing Modalities\",\n author = \"Zeng, Jiandian and\n Zhou, Jiantao and\n Liu, Tianyi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.189/\",\n doi = \"10.18653/v1/2022.emnlp-main.189\",\n pages = \"2924--2934\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.189.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.189/", + "pdf_size": 4392935, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13628677803865637899&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "State Key Laboratory of Internet of Things for Smart City + Department of Computer and Information Science, University of Macau; State Key Laboratory of Internet of Things for Smart City + Department of Computer and Information Science, University of Macau; Department of Computer Science and Engineering, Shanghai Jiao Tong University", + "aff_domain": "um.edu.mo;um.edu.mo;sjtu.edu.cn", + "email": "um.edu.mo;um.edu.mo;sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;2", + "aff_unique_norm": "State Key Laboratory of Internet of Things for Smart City;University of Macau;Shanghai Jiao Tong University", + "aff_unique_dep": ";Department of Computer and Information Science;Department of Computer Science and Engineering", + "aff_unique_url": ";https://www.um.edu.mo;https://www.sjtu.edu.cn", + "aff_unique_abbr": ";UM;SJTU", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0+1;0", + "aff_country_unique": "China;Macau" + }, + { + "id": "2022.emnlp-main.777", + "title": "Mitigating Spurious Correlation in Natural Language Understanding with Counterfactual Inference", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite their promising results on standard benchmarks, NLU models are still prone to make predictions based on shortcuts caused by unintended bias in the dataset. For example, an NLI model may use lexical overlap as a shortcut to make entailment predictions due to repetitive data generation patterns from annotators, also called annotation artifacts. In this paper, we propose a causal analysis framework to help debias NLU models. We show that (1) by defining causal relationships, we can introspect how much annotation artifacts affect the outcomes. (2) We can utilize counterfactual inference to mitigate bias with this knowledge. We found that viewing a model as a treatment can mitigate bias more effectively than viewing annotation artifacts as treatment. (3) In addition to bias mitigation, we can interpret how much each debiasing strategy is affected by annotation artifacts. Our experimental results show that using counterfactual inference can improve out-of-distribution performance in all settings while maintaining high in-distribution performance.", + "author": "Can Udomcharoenchaikit; Wuttikorn Ponwitayarat; Patomporn Payoungkhamdee; Kanruethai Masuk; Weerayut Buaphet; Ekapol Chuangsuwanich; Sarana Nutanong", + "authorids": "/c/can-udomcharoenchaikit/; /w/wuttikorn-ponwitayarat/; /p/patomporn-payoungkhamdee/; /k/kanruethai-masuk/; /w/weerayut-buaphet/; /e/ekapol-chuangsuwanich/; /s/sarana-nutanong/", + "bibtex": "@inproceedings{udomcharoenchaikit-etal-2022-mitigating,\n title = \"Mitigating Spurious Correlation in Natural Language Understanding with Counterfactual Inference\",\n author = \"Udomcharoenchaikit, Can and\n Ponwitayarat, Wuttikorn and\n Payoungkhamdee, Patomporn and\n Masuk, Kanruethai and\n Buaphet, Weerayut and\n Chuangsuwanich, Ekapol and\n Nutanong, Sarana\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.777/\",\n doi = \"10.18653/v1/2022.emnlp-main.777\",\n pages = \"11308--11321\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.777.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.777/", + "pdf_size": 1194088, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13589278174975258063&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "School of Information Science and Technology, VISTEC, Thailand; School of Information Science and Technology, VISTEC, Thailand; School of Information Science and Technology, VISTEC, Thailand; VISAI AI, Thailand + School of Information Science and Technology, VISTEC, Thailand; School of Information Science and Technology, VISTEC, Thailand; Department of Computer Engineering, Chulalongkorn University, Thailand; School of Information Science and Technology, VISTEC, Thailand", + "aff_domain": "vistec.ac.th;vistec.ac.th;vistec.ac.th;vistec.ac.th;vistec.ac.th;cp.eng.chula.ac.th;vistec.ac.th", + "email": "vistec.ac.th;vistec.ac.th;vistec.ac.th;vistec.ac.th;vistec.ac.th;cp.eng.chula.ac.th;vistec.ac.th", + "github": "https://github.com/c4n/debias_nlu", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1+0;0;2;0", + "aff_unique_norm": "VISTEC;VISAI AI;Chulalongkorn University", + "aff_unique_dep": "School of Information Science and Technology;;Department of Computer Engineering", + "aff_unique_url": "https://www.vistec.ac.th;;http://www.chula.ac.th", + "aff_unique_abbr": "VISTEC;;Chula", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Bangkok", + "aff_country_unique_index": "0;0;0;0+0;0;0;0", + "aff_country_unique": "Thailand" + }, + { + "id": "2022.findings-emnlp.239", + "title": "Mix-and-Match: Scalable Dialog Response Retrieval using Gaussian Mixture Embeddings", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Embedding-based approaches for dialog response retrieval embed the context-response pairs as points in the embedding space. These approaches are scalable, but fail to account for the complex, many-to-many relationships that exist between context-response pairs. On the other end of the spectrum, there are approaches that feed the context-response pairs jointly through multiple layers of neural networks. These approaches can model the complex relationships between context-response pairs, but fail to scale when the set of responses is moderately large (>1000). In this paper, we propose a scalable model that can learn complex relationships between context-response pairs. Specifically, the model maps the contexts as well as responses to probability distributions over the embedding space. We train the models by optimizing the Kullback-Leibler divergence between the distributions induced by context-response pairs in the training data. We show that the resultant model achieves better performance as compared to other embedding-based approaches on publicly available conversation data.", + "author": "Gaurav Pandey; Danish Contractor; Sachindra Joshi", + "authorids": "/g/gaurav-pandey/; /d/danish-contractor/; /s/sachindra-joshi/", + "bibtex": "@inproceedings{pandey-etal-2022-mix,\n title = \"Mix-and-Match: Scalable Dialog Response Retrieval using {G}aussian Mixture Embeddings\",\n author = \"Pandey, Gaurav and\n Contractor, Danish and\n Joshi, Sachindra\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.239/\",\n doi = \"10.18653/v1/2022.findings-emnlp.239\",\n pages = \"3273--3287\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.239.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.239/", + "pdf_size": 579354, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:YYKU9rLVSKkJ:scholar.google.com/&scioq=Mix-and-Match:+Scalable+Dialog+Response+Retrieval+using+Gaussian+Mixture+Embeddings&hl=en&as_sdt=0,5", + "gs_version_total": 4, + "aff": "IBM Research AI, New Delhi; IBM Research AI, New York; IBM Research AI, New Delhi", + "aff_domain": "in.ibm.com;ibm.com;in.ibm.com", + "email": "in.ibm.com;ibm.com;in.ibm.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "IBM Research AI;IBM Research", + "aff_unique_dep": "AI;AI", + "aff_unique_url": "https://www.ibm.com/research;https://www.ibm.com/research", + "aff_unique_abbr": "IBM;IBM", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "New Delhi;New York", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "India;United States" + }, + { + "id": "2022.emnlp-main.261", + "title": "Mixed-effects transformers for hierarchical adaptation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Language differs dramatically from context to context. To some degree, large language models like GPT-3 account for such variation by conditioning on strings of initial input text, or prompts. However, prompting can be ineffective when contexts are sparse, out-of-sample, or extra-textual. In this paper, we introduce the mixed-effects transformer (MET), a novel approach for learning hierarchically-structured prefixes\u2014 lightweight modules prepended to an input sequence\u2014 to account for structured variation in language use. Specifically, we show how the popular class of mixed-effects regression models may be extended to transformer-based architectures using a regularized prefix-tuning procedure with dropout. We evaluate this approach on several domain-adaptation benchmarks, finding that it learns contextual variation from minimal data while generalizing well to unseen contexts.", + "author": "Julia White; Noah Goodman; Robert Hawkins", + "authorids": "/j/julia-white/; /n/noah-goodman/; /r/robert-hawkins/", + "bibtex": "@inproceedings{white-etal-2022-mixed,\n title = \"Mixed-effects transformers for hierarchical adaptation\",\n author = \"White, Julia and\n Goodman, Noah and\n Hawkins, Robert\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.261/\",\n doi = \"10.18653/v1/2022.emnlp-main.261\",\n pages = \"3944--3954\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.261.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.261/", + "pdf_size": 668098, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15074421013434931638&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 3, + "aff": "Electrical Engineering, Stanford University; Computer Science, Psychology, Stanford University; Psychology, Princeton University", + "aff_domain": "stanford.edu;stanford.edu;princeton.edu", + "email": "stanford.edu;stanford.edu;princeton.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Stanford University;Princeton University", + "aff_unique_dep": "Electrical Engineering;Department of Psychology", + "aff_unique_url": "https://www.stanford.edu;https://www.princeton.edu", + "aff_unique_abbr": "Stanford;Princeton", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Stanford;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.303", + "title": "Mixed-modality Representation Learning and Pre-training for Joint Table-and-Text Retrieval in OpenQA", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Retrieving evidences from tabular and textual resources is essential for open-domain question answering (OpenQA), which provides more comprehensive information. However, training an effective dense table-text retriever is difficult due to the challenges of table-text discrepancy and data sparsity problem. To address the above challenges, we introduce an optimized OpenQA Table-Text Retriever (OTTeR) to jointly retrieve tabular and textual evidences. Firstly, we propose to enhance mixed-modality representation learning via two mechanisms: modality-enhanced representation and mixed-modality negative sampling strategy. Secondly, to alleviate data sparsity problem and enhance the general retrieval ability, we conduct retrieval-centric mixed-modality synthetic pre-training. Experimental results demonstrate that OTTeR substantially improves the performance of table-and-text retrieval on the OTT-QA dataset. Comprehensive analyses examine the effectiveness of all the proposed mechanisms. Besides, equipped with OTTeR, our OpenQA system achieves the state-of-the-art result on the downstream QA task, with 10.1% absolute improvement in terms of the exact match over the previous best system.", + "author": "Junjie Huang; Wanjun Zhong; Qian Liu; Ming Gong; Daxin Jiang; Nan Duan", + "authorids": "/j/junjie-huang/; /w/wanjun-zhong/; /q/qian-liu/; /m/ming-gong/; /d/daxin-jiang/; /n/nan-duan/", + "bibtex": "@inproceedings{huang-etal-2022-mixed,\n title = \"Mixed-modality Representation Learning and Pre-training for Joint Table-and-Text Retrieval in {O}pen{QA}\",\n author = \"Huang, Junjie and\n Zhong, Wanjun and\n Liu, Qian and\n Gong, Ming and\n Jiang, Daxin and\n Duan, Nan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.303/\",\n doi = \"10.18653/v1/2022.findings-emnlp.303\",\n pages = \"4117--4129\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.303.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.303/", + "pdf_size": 859224, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2868121089706207523&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/Jun-jie-Huang/OTTeR", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.278", + "title": "Mixture of Attention Heads: Selecting Attention Heads Per Token", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Mixture-of-Experts (MoE) networks have been proposed as an efficient way to scale up model capacity and implement conditional computing. However, the study of MoE components mostly focused on the feedforward layer in Transformer architecture. This paper proposes the Mixture of Attention Heads (MoA), a new architecture that combines multi-head attention with the MoE mechanism. MoA includes a set of attention heads that each has its own set of parameters. Given an input, a router dynamically selects a subset of k attention heads per token. This conditional computation schema allows MoA to achieve stronger performance than the standard multi-head attention layer. Furthermore, the sparsely gated MoA can easily scale up the number of attention heads and the number of parameters while preserving computational efficiency. Despite performance improvements, MoA also automatically differentiates heads\u2019 utilities, providing a new perspective to discuss the model\u2019s interpretability. We conducted experiments on several important tasks, including Machine Translation and Masked Language Modeling. Experiments have shown promising results on several tasks against strong baselines that involve large and very deep models.", + "author": "Xiaofeng Zhang; Yikang Shen; Zeyu Huang; Jie Zhou; Wenge Rong; Zhang Xiong", + "authorids": "/x/xiaofeng-zhang/; /y/yikang-shen/; /z/zeyu-huang/; /j/jie-zhou/; /w/wenge-rong/; /z/zhang-xiong/", + "bibtex": "@inproceedings{zhang-etal-2022-mixture,\n title = \"Mixture of Attention Heads: Selecting Attention Heads Per Token\",\n author = \"Zhang, Xiaofeng and\n Shen, Yikang and\n Huang, Zeyu and\n Zhou, Jie and\n Rong, Wenge and\n Xiong, Zhang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.278/\",\n doi = \"10.18653/v1/2022.emnlp-main.278\",\n pages = \"4150--4162\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.278.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.278/", + "pdf_size": 568385, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10712282716225096166&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "State Key Laboratory of Software Development Environment, School of Computer Science and Engineering, Beihang University, China+ Sino-French Engineer School, Beihang University, China; Mila, University of Montreal, Canada+ Wechat AI, Tencent, China; State Key Laboratory of Software Development Environment, School of Computer Science and Engineering, Beihang University, China+ Sino-French Engineer School, Beihang University, China; Wechat AI, Tencent, China; State Key Laboratory of Software Development Environment, School of Computer Science and Engineering, Beihang University, China; State Key Laboratory of Software Development Environment, School of Computer Science and Engineering, Beihang University, China", + "aff_domain": "buaa.edu.cn;gmail.com; ; ; ; ", + "email": "buaa.edu.cn;gmail.com; ; ; ; ", + "github": "https://github.com/yikangshen/MoA", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0;1+2;0+0;2;0;0", + "aff_unique_norm": "Beihang University;University of Montreal;Tencent", + "aff_unique_dep": "School of Computer Science and Engineering;Mila;Wechat AI", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.mila.quebec;https://www.tencent.com", + "aff_unique_abbr": "Beihang;Mila;Tencent", + "aff_campus_unique_index": ";1;", + "aff_campus_unique": ";Montreal", + "aff_country_unique_index": "0+0;1+0;0+0;0;0;0", + "aff_country_unique": "China;Canada" + }, + { + "id": "2022.emnlp-main.719", + "title": "MoSE: Modality Split and Ensemble for Multimodal Knowledge Graph Completion", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multimodal knowledge graph completion (MKGC) aims to predict missing entities in MKGs. Previous works usually share relation representation across modalities. This results in mutual interference between modalities during training, since for a pair of entities, the relation from one modality probably contradicts that from another modality. Furthermore, making a unified prediction based on the shared relation representation treats the input in different modalities equally, while their importance to the MKGC task should be different. In this paper, we propose MoSE, a Modality Split representation learning and Ensemble inference framework for MKGC. Specifically, in the training phase, we learn modality-split relation embeddings for each modality instead of a single modality-shared one, which alleviates the modality interference. Based on these embeddings, in the inference phase, we first make modality-split predictions and then exploit various ensemble methods to combine the predictions with different weights, which models the modality importance dynamically. Experimental results on three KG datasets show that MoSE outperforms state-of-the-art MKGC methods. Codes are available at https://github.com/OreOZhao/MoSE4MKGC.", + "author": "Yu Zhao; Xiangrui Cai; Yike Wu; Haiwei Zhang; Ying Zhang; Guoqing Zhao; Ning Jiang", + "authorids": "/y/yu-zhao/; /x/xiangrui-cai/; /y/yike-wu/; /h/haiwei-zhang/; /y/ying-zhang/; /g/guoqing-zhao/; /n/ning-jiang/", + "bibtex": "@inproceedings{zhao-etal-2022-mose,\n title = \"{M}o{SE}: Modality Split and Ensemble for Multimodal Knowledge Graph Completion\",\n author = \"Zhao, Yu and\n Cai, Xiangrui and\n Wu, Yike and\n Zhang, Haiwei and\n Zhang, Ying and\n Zhao, Guoqing and\n Jiang, Ning\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.719/\",\n doi = \"10.18653/v1/2022.emnlp-main.719\",\n pages = \"10527--10536\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.719.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.719/", + "pdf_size": 867887, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7265857542641884536&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "College of Cyber Science, TKLNDST, Nankai University, Tianjin, China; College of Cyber Science, TKLNDST, Nankai University, Tianjin, China; School of Journalism and Communication, Nankai University, Tianjin, China; College of Cyber Science, TKLNDST, Nankai University, Tianjin, China; College of Computer Science, Nankai University, Tianjin, China; Mashang Consumer Finance Co, Ltd; Mashang Consumer Finance Co, Ltd", + "aff_domain": "dbis.nankai.edu.cn;nankai.edu.cn;nankai.edu.cn;nankai.edu.cn;nankai.edu.cn; ; ", + "email": "dbis.nankai.edu.cn;nankai.edu.cn;nankai.edu.cn;nankai.edu.cn;nankai.edu.cn; ; ", + "github": "https://github.com/OreOZhao/MoSE4MKGC", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;1;1", + "aff_unique_norm": "Nankai University;Mashang Consumer Finance", + "aff_unique_dep": "College of Cyber Science;Consumer Finance", + "aff_unique_url": "http://www.nankai.edu.cn;", + "aff_unique_abbr": "Nankai;", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Tianjin;", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.530", + "title": "Modal-specific Pseudo Query Generation for Video Corpus Moment Retrieval", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Video corpus moment retrieval (VCMR) is the task to retrieve the most relevant video moment from a large video corpus using a natural language query.For narrative videos, e.g., drama or movies, the holistic understanding of temporal dynamics and multimodal reasoning are crucial.Previous works have shown promising results; however, they relied on the expensive query annotations for the VCMR, i.e., the corresponding moment intervals.To overcome this problem, we propose a self-supervised learning framework: Modal-specific Pseudo Query Generation Network (MPGN).First, MPGN selects candidate temporal moments via subtitle-based moment sampling.Then, it generates pseudo queries exploiting both visualand textual information from the selected temporal moments.Through the multimodal information in the pseudo queries, we show that MPGN successfully learns to localize the video corpus moment without any explicit annotation.We validate the effectiveness of MPGN on TVR dataset, showing the competitive results compared with both supervised models and unsupervised setting models.", + "author": "Minjoon Jung; SeongHo Choi; JooChan Kim; Jin-Hwa Kim; Byoung-Tak Zhang", + "authorids": "/m/minjoon-jung/; /s/seongho-choi/; /j/joochan-kim/; /j/jin-hwa-kim/; /b/byoung-tak-zhang/", + "bibtex": "@inproceedings{jung-etal-2022-modal,\n title = \"Modal-specific Pseudo Query Generation for Video Corpus Moment Retrieval\",\n author = \"Jung, Minjoon and\n Choi, SeongHo and\n Kim, JooChan and\n Kim, Jin-Hwa and\n Zhang, Byoung-Tak\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.530/\",\n doi = \"10.18653/v1/2022.emnlp-main.530\",\n pages = \"7769--7781\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.530.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.530/", + "pdf_size": 11528076, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3081486210354949999&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Seoul National University; Seoul National University; Seoul National University; NA VER AI Lab + AI Institute of Seoul National University; Seoul National University + AI Institute of Seoul National University", + "aff_domain": "bi.snu.ac.kr;bi.snu.ac.kr;bi.snu.ac.kr;navercorp.com;bi.snu.ac.kr", + "email": "bi.snu.ac.kr;bi.snu.ac.kr;bi.snu.ac.kr;navercorp.com;bi.snu.ac.kr", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1+0;0+0", + "aff_unique_norm": "Seoul National University;NAVER Corporation", + "aff_unique_dep": ";AI Lab", + "aff_unique_url": "https://www.snu.ac.kr;https://www.naver.com", + "aff_unique_abbr": "SNU;NAVER", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;0+0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.emnlp-main.756", + "title": "Model Cascading: Towards Jointly Improving Efficiency and Accuracy of NLP Systems", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Do all instances need inference through the big models for a correct prediction? Perhaps not; some instances are easy and can be answered correctly by even small capacity models. This provides opportunities for improving the computational efficiency of systems. In this work, we present an explorative study on \u2018model cascading\u2019, a simple technique that utilizes a collection of models of varying capacities to accurately yet efficiently output predictions. Through comprehensive experiments in multiple task settings that differ in the number of models available for cascading (K value), we show that cascading improves both the computational efficiency and the prediction accuracy. For instance, in K=3 setting, cascading saves up to 88.93% computation cost and consistently achieves superior prediction accuracy with an improvement of up to 2.18%. We also study the impact of introducing additional models in the cascade and show that it further increases the efficiency improvements. Finally, we hope that our work will facilitate development of efficient NLP systems making their widespread adoption in real-world applications possible.", + "author": "Neeraj Varshney; Chitta Baral", + "authorids": "/n/neeraj-varshney/; /c/chitta-baral/", + "bibtex": "@inproceedings{varshney-baral-2022-model,\n title = \"Model Cascading: Towards Jointly Improving Efficiency and Accuracy of {NLP} Systems\",\n author = \"Varshney, Neeraj and\n Baral, Chitta\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.756/\",\n doi = \"10.18653/v1/2022.emnlp-main.756\",\n pages = \"11007--11021\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.756.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.756/", + "pdf_size": 1426323, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14877012563956835995&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Arizona State University; Arizona State University", + "aff_domain": "asu.edu;asu.edu", + "email": "asu.edu;asu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Arizona State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.asu.edu", + "aff_unique_abbr": "ASU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.815", + "title": "Model Criticism for Long-Form Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Language models have demonstrated the ability to generate highly fluent text; however, it remains unclear whether their output retains coherent high-level structure (e.g., story progression). Here, we propose to apply a statistical tool, model criticism in latent space, to evaluate the high-level structure of the generated text. Model criticism compares the distributions between real and generated data in a latent space obtained according to an assumptive generative process. Different generative processes identify specific failure modes of the underlying model. We perform experiments on three representative aspects of high-level discourse\u2014coherence, coreference, and topicality\u2014and find that transformer-based language models are able to capture topical structures but have a harder time maintaining structural coherence or modeling coreference.", + "author": "Yuntian Deng; Volodymyr Kuleshov; Alexander Rush", + "authorids": "/y/yuntian-deng/; /v/volodymyr-kuleshov/; /a/alexander-m-rush/", + "bibtex": "@inproceedings{deng-etal-2022-model,\n title = \"Model Criticism for Long-Form Text Generation\",\n author = \"Deng, Yuntian and\n Kuleshov, Volodymyr and\n Rush, Alexander\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.815/\",\n doi = \"10.18653/v1/2022.emnlp-main.815\",\n pages = \"11887--11912\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.815.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.815/", + "pdf_size": 1468437, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12194647264159842626&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Harvard University; Cornell University; Cornell University", + "aff_domain": "seas.harvard.edu;cornell.edu;cornell.edu", + "email": "seas.harvard.edu;cornell.edu;cornell.edu", + "github": "https://github.com/da03/criticize_text_generation", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Harvard University;Cornell University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.harvard.edu;https://www.cornell.edu", + "aff_unique_abbr": "Harvard;Cornell", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.478", + "title": "Model and Data Transfer for Cross-Lingual Sequence Labelling in Zero-Resource Settings", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Zero-resource cross-lingual transfer approaches aim to apply supervised modelsfrom a source language to unlabelled target languages. In this paper we performan in-depth study of the two main techniques employed so far for cross-lingualzero-resource sequence labelling, based either on data or model transfer.Although previous research has proposed translation and annotation projection(data-based cross-lingual transfer) as an effective technique for cross-lingualsequence labelling, in this paper we experimentally demonstrate that highcapacity multilingual language models applied in a zero-shot (model-basedcross-lingual transfer) setting consistently outperform data-basedcross-lingual transfer approaches. A detailed analysis of our results suggeststhat this might be due to important differences in language use. Morespecifically, machine translation often generates a textual signal which isdifferent to what the models are exposed to when using gold standard data,which affects both the fine-tuning and evaluation processes. Our results alsoindicate that data-based cross-lingual transfer approaches remain a competitiveoption when high-capacity multilingual language models are not available.", + "author": "Iker Garc\u00eda-Ferrero; Rodrigo Agerri; German Rigau", + "authorids": "/i/iker-garcia-ferrero/; /r/rodrigo-agerri/; /g/german-rigau/", + "bibtex": "@inproceedings{garcia-ferrero-etal-2022-model,\n title = \"Model and Data Transfer for Cross-Lingual Sequence Labelling in Zero-Resource Settings\",\n author = \"Garc{\\'i}a-Ferrero, Iker and\n Agerri, Rodrigo and\n Rigau, German\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.478/\",\n doi = \"10.18653/v1/2022.findings-emnlp.478\",\n pages = \"6403--6416\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.478.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.478/", + "pdf_size": 1798475, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=259662158228709599&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "HiTZ Basque Center for Language Technologies - Ixa NLP Group; University of the Basque Country UPV/EHU; University of the Basque Country UPV/EHU", + "aff_domain": "ehu.eus;ehu.eus;ehu.eus", + "email": "ehu.eus;ehu.eus;ehu.eus", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "HiTZ Basque Center for Language Technologies;University of the Basque Country", + "aff_unique_dep": "Ixa NLP Group;", + "aff_unique_url": "https://www.hitz.center/;https://www.ehu.eus/en", + "aff_unique_abbr": "HiTZ;UPV/EHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Spain" + }, + { + "id": "2022.findings-emnlp.305", + "title": "Modeling Complex Dialogue Mappings via Sentence Semantic Segmentation Guided Conditional Variational Auto-Encoder", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Complex dialogue mappings (CDM), including one-to-many and many-to-one mappings, tend to make dialogue models generate incoherent or dull responses, and modeling these mappings remains a huge challenge for neural dialogue systems. To alleviate these problems, methods like introducing external information, reconstructing the optimization function, and manipulating data samples are proposed, while they primarily focus on avoiding training with CDM, inevitably weakening the model\u2019s ability of understanding CDM in human conversations and limiting further improvements in model performance. This paper proposes a Sentence Semantic Segmentation guided Conditional Variational Auto-Encoder (SegCVAE) method which can model and take advantages of the CDM data. Specifically, to tackle the incoherent problem caused by one-to-many, SegCVAE uses response-related prominent semantics to constrained the latent variable. To mitigate the non-diverse problem brought by many-to-one, SegCVAE segments multiple prominent semantics to enrich the latent variables. Three novel components, Internal Separation, External Guidance, and Semantic Norms, are proposed to achieve SegCVAE. On dialogue generation tasks, both the automatic and human evaluation results show that SegCVAE achieves new state-of-the-art performance.", + "author": "Bin Sun; Shaoxiong Feng; Yiwei Li; Weichao Wang; Fei Mi; Yitong Li; Kan Li", + "authorids": "/b/bin-sun/; /s/shaoxiong-feng/; /y/yiwei-li/; /w/weichao-wang/; /f/fei-mi/; /y/yitong-li/; /k/kan-li/", + "bibtex": "@inproceedings{sun-etal-2022-modeling,\n title = \"Modeling Complex Dialogue Mappings via Sentence Semantic Segmentation Guided Conditional Variational Auto-Encoder\",\n author = \"Sun, Bin and\n Feng, Shaoxiong and\n Li, Yiwei and\n Wang, Weichao and\n Mi, Fei and\n Li, Yitong and\n Li, Kan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.305/\",\n doi = \"10.18653/v1/2022.findings-emnlp.305\",\n pages = \"4140--4153\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.305.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.305/", + "pdf_size": 524318, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2246700948705470438&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "School of Computer Science & Technology, Beijing Institute of Technology; School of Computer Science & Technology, Beijing Institute of Technology; School of Computer Science & Technology, Beijing Institute of Technology; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Huawei Technologies Ltd. + Huawei Noah\u2019s Ark Lab; School of Computer Science & Technology, Beijing Institute of Technology", + "aff_domain": "bit.edu.cn;bit.edu.cn;bit.edu.cn;huawei.com;huawei.com;huawei.com;bit.edu.cn", + "email": "bit.edu.cn;bit.edu.cn;bit.edu.cn;huawei.com;huawei.com;huawei.com;bit.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;1;2+1;0", + "aff_unique_norm": "Beijing Institute of Technology;Huawei;Huawei Technologies", + "aff_unique_dep": "School of Computer Science & Technology;Noah\u2019s Ark Lab;", + "aff_unique_url": "http://www.bit.edu.cn/;https://www.huawei.com;https://www.huawei.com", + "aff_unique_abbr": "BIT;Huawei;Huawei", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.424", + "title": "Modeling Consistency Preference via Lexical Chains for Document-level Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper we aim to relieve the issue of lexical translation inconsistency for document-level neural machine translation (NMT) by modeling consistency preference for lexical chains, which consist of repeated words in a source-side document and provide a representation of the lexical consistency structure of the document. Specifically, we first propose lexical-consistency attention to capture consistency context among words in the same lexical chains. Then for each lexical chain we define and learn a consistency-tailored latent variable, which will guide the translation of corresponding sentences to enhance lexical translation consistency. Experimental results on Chinese\u2192English and French\u2192English document-level translation tasks show that our approach not only significantly improves translation performance in BLEU, but also substantially alleviates the problem of the lexical translation inconsistency.", + "author": "Xinglin Lyu; Junhui Li; Shimin Tao; Hao Yang; Ying Qin; Min Zhang", + "authorids": "/x/xinglin-lyu/; /j/junhui-li/; /s/shimin-tao/; /h/hao-yang/; /y/ying-qin/; /m/min-zhang/", + "bibtex": "@inproceedings{lyu-etal-2022-modeling,\n title = \"Modeling Consistency Preference via Lexical Chains for Document-level Neural Machine Translation\",\n author = \"Lyu, Xinglin and\n Li, Junhui and\n Tao, Shimin and\n Yang, Hao and\n Qin, Ying and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.424/\",\n doi = \"10.18653/v1/2022.emnlp-main.424\",\n pages = \"6312--6326\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.424.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.424/", + "pdf_size": 1728048, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9139275079339231559&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 0, + "aff": "School of Computer Science and Technology, Soochow University, Suzhou, China; School of Computer Science and Technology, Soochow University, Suzhou, China; Huawei Translation Services Center, Beijing, China; Huawei Translation Services Center, Beijing, China; Huawei Translation Services Center, Beijing, China; School of Computer Science and Technology, Soochow University, Suzhou, China", + "aff_domain": "stu.suda.edu.cn;suda.edu.cn;suda.edu.cn;huawei.com;huawei.com;huawei.com", + "email": "stu.suda.edu.cn;suda.edu.cn;suda.edu.cn;huawei.com;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;1;1;0", + "aff_unique_norm": "Soochow University;Huawei", + "aff_unique_dep": "School of Computer Science and Technology;Translation Services Center", + "aff_unique_url": "http://www.soochow.edu.cn;https://www.huawei.com", + "aff_unique_abbr": ";Huawei", + "aff_campus_unique_index": "0;0;1;1;1;0", + "aff_campus_unique": "Suzhou;Beijing", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.515", + "title": "Modeling Context With Linear Attention for Scalable Document-Level Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Document-level machine translation leverages inter-sentence dependencies to produce more coherent and consistent translations. However, these models, predominantly based on transformers, are difficult to scale to long documents as their attention layers have quadratic complexity in the sequence length. Recent efforts on efficient attention improve scalability, but their effect on document translation remains unexplored. In this work, we investigate the efficacy of a recent linear attention model by Peng et al. (2021) on document translation and augment it with a sentential gate to promote a recency inductive bias. We evaluate the model on IWSLT 2015 and OpenSubtitles 2018 against the transformer, demonstrating substantially increased decoding speed on long sequences with similar or better BLEU scores. We show that sentential gating further improves translation quality on IWSLT.", + "author": "Zhaofeng Wu; Hao Peng; Nikolaos Pappas; Noah A. Smith", + "authorids": "/z/zhaofeng-wu/; /h/hao-peng/; /n/nikolaos-pappas/; /n/noah-a-smith/", + "bibtex": "@inproceedings{wu-etal-2022-modeling,\n title = \"Modeling Context With Linear Attention for Scalable Document-Level Translation\",\n author = \"Wu, Zhaofeng and\n Peng, Hao and\n Pappas, Nikolaos and\n Smith, Noah A.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.515/\",\n doi = \"10.18653/v1/2022.findings-emnlp.515\",\n pages = \"6931--6939\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.515.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.515/", + "pdf_size": 441239, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2095660658591216248&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "MIT; Allen Institute for Artificial Intelligence; AWS AI; Paul G. Allen School of Computer Science & Engineering, University of Washington", + "aff_domain": "csail.mit.edu;allenai.org;amazon.com;cs.washington.edu", + "email": "csail.mit.edu;allenai.org;amazon.com;cs.washington.edu", + "github": "https://github.com/ZhaofengWu/rfa-doc-mt6931", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Massachusetts Institute of Technology;Allen Institute for Artificial Intelligence;Amazon Web Services;University of Washington", + "aff_unique_dep": ";;AWS AI;Paul G. Allen School of Computer Science & Engineering", + "aff_unique_url": "https://web.mit.edu;https://allenai.org;https://aws.amazon.com;https://www.washington.edu", + "aff_unique_abbr": "MIT;AI2;AWS;UW", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Seattle", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.117", + "title": "Modeling Information Change in Science Communication with Semantically Matched Paraphrases", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Whether the media faithfully communicate scientific information has long been a core issue to the science community. Automatically identifying paraphrased scientific findings could enable large-scale tracking and analysis of information changes in the science communication process, but this requires systems to understand the similarity between scientific information across multiple domains. To this end, we present the SCIENTIFIC PARAPHRASE AND INFORMATION CHANGE DATASET (SPICED), the first paraphrase dataset of scientific findings annotated for degree of information change. SPICED contains 6,000 scientific finding pairs extracted from news stories, social media discussions, and full texts of original papers. We demonstrate that SPICED poses a challenging task and that models trained on SPICED improve downstream performance on evidence retrieval for fact checking of real-world scientific claims. Finally, we show that models trained on SPICED can reveal large-scale trends in the degrees to which people and organizations faithfully communicate new scientific findings. Data, code, and pre-trained models are available at http://www.copenlu.com/publication/2022_emnlp_wright/.", + "author": "Dustin Wright; Jiaxin Pei; David Jurgens; Isabelle Augenstein", + "authorids": "/d/dustin-wright/; /j/jiaxin-pei/; /d/david-jurgens/; /i/isabelle-augenstein/", + "bibtex": "@inproceedings{wright-etal-2022-modeling,\n title = \"Modeling Information Change in Science Communication with Semantically Matched Paraphrases\",\n author = \"Wright, Dustin and\n Pei, Jiaxin and\n Jurgens, David and\n Augenstein, Isabelle\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.117/\",\n doi = \"10.18653/v1/2022.emnlp-main.117\",\n pages = \"1783--1807\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.117.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.117/", + "pdf_size": 667421, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16301192229674221718&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff": "Dept. of Computer Science, University of Copenhagen, Denmark; School of Information, University of Michigan, Ann Arbor, MI, USA; School of Information, University of Michigan, Ann Arbor, MI, USA; Dept. of Computer Science, University of Copenhagen, Denmark", + "aff_domain": "di.ku.dk;di.ku.dk;umich.edu;umich.edu", + "email": "di.ku.dk;di.ku.dk;umich.edu;umich.edu", + "github": "", + "project": "http://www.copenlu.com/publication/2022_emnlp_wright/", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "University of Copenhagen;University of Michigan", + "aff_unique_dep": "Dept. of Computer Science;School of Information", + "aff_unique_url": "https://www.ku.dk;https://www.umich.edu", + "aff_unique_abbr": "UCPH;UM", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Ann Arbor", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "Denmark;United States" + }, + { + "id": "2022.emnlp-main.459", + "title": "Modeling Label Correlations for Ultra-Fine Entity Typing with Neural Pairwise Conditional Random Field", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Ultra-fine entity typing (UFET) aims to predict a wide range of type phrases that correctly describe the categories of a given entity mention in a sentence. Most recent works infer each entity type independently, ignoring the correlations between types, e.g., when an entity is inferred as a president, it should also be a politician and a leader. To this end, we use an undirected graphical model called pairwise conditional random field (PCRF) to formulate the UFET problem, in which the type variables are not only unarily influenced by the input but also pairwisely relate to all the other type variables. We use various modern backbones for entity typing to compute unary potentials, and derive pairwise potentials from type phrase representations that both capture prior semantic information and facilitate accelerated inference. We use mean-field variational inference for efficient type inference on very large type sets and unfold it as a neural network module to enable end-to-end training. Experiments on UFET show that the Neural-PCRF consistently outperforms its backbones with little cost and results in a competitive performance against cross-encoder based SOTA while being thousands of times faster. We also find Neural-PCRF effective on a widely used fine-grained entity typing dataset with a smaller type set. We pack Neural-PCRF as a network module that can be plugged onto multi-label type classifiers with ease and release it in .", + "author": "Chengyue Jiang; Yong Jiang; Weiqi Wu; Pengjun Xie; Kewei Tu", + "authorids": "/c/chengyue-jiang/; /y/yong-jiang/; /w/weiqi-wu/; /p/pengjun-xie/; /k/kewei-tu/", + "bibtex": "@inproceedings{jiang-etal-2022-modeling,\n title = \"Modeling Label Correlations for Ultra-Fine Entity Typing with Neural Pairwise Conditional Random Field\",\n author = \"Jiang, Chengyue and\n Jiang, Yong and\n Wu, Weiqi and\n Xie, Pengjun and\n Tu, Kewei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.459/\",\n doi = \"10.18653/v1/2022.emnlp-main.459\",\n pages = \"6836--6847\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.459.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.459/", + "pdf_size": 3523149, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11735514768728318805&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "DAMO Academy, Alibaba Group, China+This work was done during Chengyue Jiang\u2019s internship at DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group, China; DAMO Academy, Alibaba Group, China; DAMO Academy, Alibaba Group, China; DAMO Academy, Alibaba Group, China", + "aff_domain": "gmail.com;alibaba-inc.com;foxmail.com;alibaba-inc.com;gmail.com", + "email": "gmail.com;alibaba-inc.com;foxmail.com;alibaba-inc.com;gmail.com", + "github": "github.com/modelscope/adaseq/examples/NPCRF", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "DAMO Academy", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.291", + "title": "Momentum Contrastive Pre-training for Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing pre-training methods for extractive Question Answering (QA) generate cloze-like queries different from natural questions in syntax structure, which could overfit pre-trained models to simple keyword matching. In order to address this problem, we propose a novel Momentum Contrastive pRe-training fOr queStion anSwering (MCROSS) method for extractive QA. Specifically, MCROSS introduces a momentum contrastive learning framework to align the answer probability between cloze-like and natural query-passage sample pairs. Hence, the pre-trained models can better transfer the knowledge learned in cloze-like samples to answering natural questions. Experimental results on three benchmarking QA datasets show that our method achieves noticeable improvement compared with all baselines in both supervised and zero-shot scenarios.", + "author": "Minda Hu; Muzhi Li; Yasheng Wang; Irwin King", + "authorids": "/m/minda-hu/; /m/muzhi-li/; /y/yasheng-wang/; /i/irwin-king/", + "bibtex": "@inproceedings{hu-etal-2022-momentum,\n title = \"Momentum Contrastive Pre-training for Question Answering\",\n author = \"Hu, Minda and\n Li, Muzhi and\n Wang, Yasheng and\n King, Irwin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.291/\",\n doi = \"10.18653/v1/2022.emnlp-main.291\",\n pages = \"4324--4330\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.291.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.291/", + "pdf_size": 312586, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10299808431015042985&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Dept. of Computer Science & Engineering, The Chinese University of Hong Kong; Dept. of Computer Science & Engineering, The Chinese University of Hong Kong; Huawei Noah\u2019s Ark Lab; Dept. of Computer Science & Engineering, The Chinese University of Hong Kong", + "aff_domain": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;huawei.com;cse.cuhk.edu.hk", + "email": "cse.cuhk.edu.hk;cse.cuhk.edu.hk;huawei.com;cse.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "The Chinese University of Hong Kong;Huawei", + "aff_unique_dep": "Dept. of Computer Science & Engineering;Noah\u2019s Ark Lab", + "aff_unique_url": "https://www.cuhk.edu.hk;https://www.huawei.com", + "aff_unique_abbr": "CUHK;Huawei", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Hong Kong;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.135", + "title": "MovieUN: A Dataset for Movie Understanding and Narrating", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Automatic movie narration generation and narration grounding are very important to provide a true movie experience for the blind and visually impaired. To tell the movie story well, it is necessary to mention plot-related details (such as character names) and keep the narrations in a plot coherent. Taking these two points into consideration, we construct a Chinese large-scale video benchmark from 101 movies for Movie Understanding and Narrating (MovieUN) to support the Movie Clip Narrating (MCN) task and Temporal Narration Grounding (TNG) task. We split movies in MovieUN into movie clips according to plots, and pair them with corresponding narrations provided by the movie narrators. Ultimately, the TNG task involves 3,253 long video clips totaling 179 hours. The MCN task contains 33,060 video clips totaling 105 hours. We benchmark state-of-the-art video captioning models and temporal grounding models in MCN and TNG tasks, respectively. Furthermore, to accurately comprehend plots of different characters, we propose methods to incorporate portraits of actors as external knowledge in both tasks. The experiment results demonstrate the effectiveness of our proposed methods. The dataset and codes are released at https://github.com/yuezih/MovieUN.", + "author": "Qi Zhang; Zihao Yue; Anwen Hu; Ziheng Wang; Qin Jin", + "authorids": "/q/qi-zhang/; /z/zihao-yue/; /a/anwen-hu/; /z/ziheng-wang/; /q/qin-jin/", + "bibtex": "@inproceedings{zhang-etal-2022-movieun,\n title = \"{M}ovie{UN}: A Dataset for Movie Understanding and Narrating\",\n author = \"Zhang, Qi and\n Yue, Zihao and\n Hu, Anwen and\n Wang, Ziheng and\n Jin, Qin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.135/\",\n doi = \"10.18653/v1/2022.findings-emnlp.135\",\n pages = \"1873--1885\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.135.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.135/", + "pdf_size": 1384926, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6694722440934128924&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 2, + "aff": "School of Information, Renmin University of China; School of Information, Renmin University of China; School of Information, Renmin University of China; School of Information, Renmin University of China; School of Information, Renmin University of China", + "aff_domain": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn;ruc.edu.cn", + "github": "https://github.com/yuezih/MovieUN", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Renmin University of China", + "aff_unique_dep": "School of Information", + "aff_unique_url": "http://www.ruc.edu.cn", + "aff_unique_abbr": "RUC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.498", + "title": "MuGER2: Multi-Granularity Evidence Retrieval and Reasoning for Hybrid Question Answering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Hybrid question answering (HQA) aims to answer questions over heterogeneous data, including tables and passages linked to table cells. The heterogeneous data can provide different granularity evidence to HQA models, e.t., column, row, cell, and link. Conventional HQA models usually retrieve coarse- or fine-grained evidence to reason the answer. Through comparison, we find that coarse-grained evidence is easier to retrieve but contributes less to the reasoner, while fine-grained evidence is the opposite. To preserve the advantage and eliminate the disadvantage of different granularity evidence, we propose MuGER2, a Multi-Granularity Evidence Retrieval and Reasoning approach. In evidence retrieval, a unified retriever is designed to learn the multi-granularity evidence from the heterogeneous data. In answer reasoning, an evidence selector is proposed to navigate the fine-grained evidence for the answer reader based on the learned multi-granularity evidence. Experiment results on the HybridQA dataset show that MuGER2 significantly boosts the HQA performance. Further ablation analysis verifies the effectiveness of both the retrieval and reasoning designs.", + "author": "Yingyao Wang; Junwei Bao; Chaoqun Duan; Youzheng Wu; Xiaodong He; Tiejun Zhao", + "authorids": "/y/yingyao-wang/; /j/junwei-bao/; /c/chaoqun-duan/; /y/youzheng-wu/; /x/xiaodong-he/; /t/tiejun-zhao/", + "bibtex": "@inproceedings{wang-etal-2022-muger2,\n title = \"{M}u{GER}2: Multi-Granularity Evidence Retrieval and Reasoning for Hybrid Question Answering\",\n author = \"Wang, Yingyao and\n Bao, Junwei and\n Duan, Chaoqun and\n Wu, Youzheng and\n He, Xiaodong and\n Zhao, Tiejun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.498/\",\n doi = \"10.18653/v1/2022.findings-emnlp.498\",\n pages = \"6687--6697\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.498.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.498/", + "pdf_size": 880966, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1797643963684299310&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Harbin Institute of Technology; JD AI Research; JD AI Research; JD AI Research; JD AI Research; Harbin Institute of Technology", + "aff_domain": "hit-mtlab.net;gmail.com; ; ; ;hit.edu.cn", + "email": "hit-mtlab.net;gmail.com; ; ; ;hit.edu.cn", + "github": "https://github.com/JD-AI-Research-NLP/MuGER2", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;0", + "aff_unique_norm": "Harbin Institute of Technology;JD AI Research", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.jd.com", + "aff_unique_abbr": "HIT;JD AI", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Harbin;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.375", + "title": "MuRAG: Multimodal Retrieval-Augmented Generator for Open Question Answering over Images and Text", + "track": "main", + "status": "Main", + "award": false, + "abstract": "While language Models store a massive amount of world knowledge implicitly in their parameters, even very large models often fail to encode information about rare entities and events, while incurring huge computational costs. Recently, retrieval-augmented models, such as REALM, RAG, and RETRO, have incorporated world knowledge into language generation by leveraging an external non-parametric index and have demonstrated impressive performance with constrained model sizes. However, these methods are restricted to retrieving only textual knowledge, neglecting the ubiquitous amount of knowledge in other modalities like images \u2013 much of which contains information not covered by any text. To address this limitation, we propose the first Multimodal Retrieval-Augmented Transformer (MuRAG), which accesses an external non-parametric multimodal memory to augment language generation. MuRAG is pre-trained with a mixture of large-scale image-text and text-only corpora using a joint contrastive and generative loss. We perform experiments on two different datasets that require retrieving and reasoning over both images and text to answer a given query: WebQA, and MultimodalQA. Our results show that MuRAG achieves state-of-the-art accuracy, outperforming existing models by 10-20% absolute on both datasets and under both distractor and full-wiki settings.", + "author": "Wenhu Chen; Hexiang Hu; Xi Chen; Pat Verga; William Cohen", + "authorids": "/w/wenhu-chen/; /h/hexiang-hu/; /x/xi-chen/; /p/pat-verga/; /w/william-cohen/", + "bibtex": "@inproceedings{chen-etal-2022-murag,\n title = \"{M}u{RAG}: Multimodal Retrieval-Augmented Generator for Open Question Answering over Images and Text\",\n author = \"Chen, Wenhu and\n Hu, Hexiang and\n Chen, Xi and\n Verga, Pat and\n Cohen, William\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.375/\",\n doi = \"10.18653/v1/2022.emnlp-main.375\",\n pages = \"5558--5570\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.375.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.375/", + "pdf_size": 3748824, + "gs_citation": 148, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10371260834181714537&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Google Research; Google Research; Google Research; Google Research; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.339", + "title": "Multi-Granularity Optimization for Non-Autoregressive Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite low latency, non-autoregressive machine translation (NAT) suffers severe performance deterioration due to the naive independence assumption. This assumption is further strengthened by cross-entropy loss, which encourages a strict match between the hypothesis and the reference token by token. To alleviate this issue, we propose multi-granularity optimization for NAT, which collects model behaviours on translation segments of various granularities and integrates feedback for backpropagation. Experiments on four WMT benchmarks show that the proposed method significantly outperforms the baseline models trained with cross-entropy loss, and achieves the best performance on WMT\u201916 En\u21d4Ro and highly competitive results on WMT\u201914 En\u21d4De for fully non-autoregressive translation.", + "author": "Yafu Li; Leyang Cui; Yongjing Yin; Yue Zhang", + "authorids": "/y/yafu-li/; /l/leyang-cui/; /y/yongjing-yin/; /y/yue-zhang/", + "bibtex": "@inproceedings{li-etal-2022-multi-granularity,\n title = \"Multi-Granularity Optimization for Non-Autoregressive Translation\",\n author = \"Li, Yafu and\n Cui, Leyang and\n Yin, Yongjing and\n Zhang, Yue\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.339/\",\n doi = \"10.18653/v1/2022.emnlp-main.339\",\n pages = \"5073--5084\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.339.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.339/", + "pdf_size": 660110, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8869422901323102897&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Zhejiang University\u2660\u2661; Tencent AI lab\u2663; Zhejiang University\u2660\u2661; School of Engineering, Westlake University\u2661\u2662", + "aff_domain": "gmail.com;tencent.com;westlake.edu.cn;wias.org.cn", + "email": "gmail.com;tencent.com;westlake.edu.cn;wias.org.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "Zhejiang University;Tencent;Westlake University", + "aff_unique_dep": ";AI lab;School of Engineering", + "aff_unique_url": "http://www.zju.edu.cn;https://ai.tencent.com;https://www.westlake.edu.cn", + "aff_unique_abbr": "ZJU;Tencent AI lab;WU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.512", + "title": "Multi-Label Intent Detection via Contrastive Task Specialization of Sentence Encoders", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Deploying task-oriented dialog ToD systems for new domains and tasks requires natural language understanding models that are 1) resource-efficient and work under low-data regimes; 2) adaptable, efficient, and quick-to-train; 3) expressive and can handle complex ToD scenarios with multiple user intents in a single utterance. Motivated by these requirements, we introduce a novel framework for multi-label intent detection (mID): MultI-ConvFiT (Multi-Label Intent Detection via Contrastive Conversational Fine-Tuning). While previous work on efficient single-label intent detection learns a classifier on top of a fixed sentence encoder (SE), we propose to 1) transform general-purpose SEs into task-specialized SEs via contrastive fine-tuning on annotated multi-label data, 2) where task specialization knowledge can be stored into lightweight adapter modules without updating the original parameters of the input SE, and then 3) we build improved mID classifiers stacked on top of fixed specialized SEs. Our main results indicate that MultI-ConvFiT yields effective mID models, with large gains over non-specialized SEs reported across a spectrum of different mID datasets, both in low-data and high-data regimes.", + "author": "Ivan Vuli\u0107; I\u00f1igo Casanueva; Georgios Spithourakis; Avishek Mondal; Tsung-Hsien Wen; Pawe\u0142 Budzianowski", + "authorids": "/i/ivan-vulic/; /i/inigo-casanueva/; /g/georgios-spithourakis/; /a/avishek-mondal/; /t/tsung-hsien-wen/; /p/pawel-budzianowski/", + "bibtex": "@inproceedings{vulic-etal-2022-multi,\n title = \"Multi-Label Intent Detection via Contrastive Task Specialization of Sentence Encoders\",\n author = \"Vuli{\\'c}, Ivan and\n Casanueva, I{\\~n}igo and\n Spithourakis, Georgios and\n Mondal, Avishek and\n Wen, Tsung-Hsien and\n Budzianowski, Pawe{\\l}\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.512/\",\n doi = \"10.18653/v1/2022.emnlp-main.512\",\n pages = \"7544--7559\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.512.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.512/", + "pdf_size": 1196998, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17519630594458177699&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 0, + "aff": "PolyAI Limited; PolyAI Limited; PolyAI Limited; PolyAI Limited; PolyAI Limited; PolyAI Limited", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "https://poly.ai/modular-intent-design/", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "PolyAI Limited", + "aff_unique_dep": "", + "aff_unique_url": "https://www.poly.ai", + "aff_unique_abbr": "PolyAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.findings-emnlp.414", + "title": "Multi-Path Transformer is Better: A Case Study on Neural Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "For years the model performance in machine learning obeyed a power-law relationship with the model size. For the consideration of parameter efficiency, recent studies focus on increasing model depth rather than width to achieve better performance. In this paper, we study how model width affects the Transformer model through a parameter-efficient multi-path structure. To better fuse features extracted from different paths, we add three additional operations to each sublayer: a normalization at the end of each path, a cheap operation to produce more features, and a learnable weighted mechanism to fuse all features flexibly. Extensive experiments on 12 WMT machine translation tasks show that, with the same number of parameters, the shallower multi-path model can achieve similar or even better performance than the deeper model. It reveals that we should pay more attention to the multi-path structure, and there should be a balance between the model depth and width to train a better large-scale Transformer.", + "author": "Ye Lin; Shuhan Zhou; Yanyang Li; Anxiang Ma; Tong Xiao; Jingbo Zhu", + "authorids": "/y/ye-lin/; /s/shuhan-zhou/; /y/yanyang-li/; /a/anxiang-ma/; /t/tong-xiao/; /j/jingbo-zhu/", + "bibtex": "@inproceedings{lin-etal-2022-multi-path,\n title = \"Multi-Path Transformer is Better: A Case Study on Neural Machine Translation\",\n author = \"Lin, Ye and\n Zhou, Shuhan and\n Li, Yanyang and\n Ma, Anxiang and\n Xiao, Tong and\n Zhu, Jingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.414/\",\n doi = \"10.18653/v1/2022.findings-emnlp.414\",\n pages = \"5646--5656\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.414.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.414/", + "pdf_size": 318606, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5432939734818397285&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China; NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China; The Chinese University of Hong Kong, Hong Kong, China; NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China; NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China + NiuTrans Research, Shenyang, China; NLP Lab, School of Computer Science and Engineering, Northeastern University, Shenyang, China + NiuTrans Research, Shenyang, China", + "aff_domain": "outlook.com;outlook.com;outlook.com;mail.neu.edu.cn;mail.neu.edu.cn;mail.neu.edu.cn", + "email": "outlook.com;outlook.com;outlook.com;mail.neu.edu.cn;mail.neu.edu.cn;mail.neu.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;0+2;0+2", + "aff_unique_norm": "Northeastern University;The Chinese University of Hong Kong;NiuTrans Research", + "aff_unique_dep": "School of Computer Science and Engineering;;", + "aff_unique_url": "http://www.neu.edu.cn/;https://www.cuhk.edu.hk;", + "aff_unique_abbr": "NEU;CUHK;", + "aff_campus_unique_index": "0;0;1;0;0;0", + "aff_campus_unique": "Shenyang;Hong Kong;", + "aff_country_unique_index": "0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-industry.19", + "title": "Multi-Tenant Optimization For Few-Shot Task-Oriented FAQ Retrieval", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Business-specific Frequently Asked Questions (FAQ) retrieval in task-oriented dialog systems poses unique challenges vis \u00e0 vis community based FAQs. Each FAQ question represents an intent which is usually an umbrella term for many related user queries. We evaluate performance for such Business FAQs both with standard FAQ retrieval techniques using query-Question (q-Q) similarity and few-shot intent detection techniques. Implementing a real-world solution for FAQ retrieval in order to support multiple tenants (FAQ sets) entails optimizing speed, accuracy and cost. We propose a novel approach to scale multi-tenant FAQ applications in real-world context by contrastive fine-tuning of the last layer in sentence Bi-Encoders along with tenant-specific weight switching.", + "author": "Asha Vishwanathan; Rajeev Warrier; Gautham Vadakkekara Suresh; Chandra Shekhar Kandpal", + "authorids": "/a/asha-vishwanathan/; /r/rajeev-warrier/; /g/gautham-vadakkekara-suresh/; /c/chandra-shekhar-kandpal/", + "bibtex": "@inproceedings{vishwanathan-etal-2022-multi,\n title = \"Multi-Tenant Optimization For Few-Shot Task-Oriented {FAQ} Retrieval\",\n author = \"Vishwanathan, Asha and\n Warrier, Rajeev and\n Vadakkekara Suresh, Gautham and\n Kandpal, Chandra Shekhar\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.19/\",\n doi = \"10.18653/v1/2022.emnlp-industry.19\",\n pages = \"188--197\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.19.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.19/", + "pdf_size": 325438, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16949655455679954793&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Verloop.io; Verloop.io; Verloop.io; Verloop.io", + "aff_domain": "verloop.io;verloop.io;yahoo.com;gmail.com", + "email": "verloop.io;verloop.io;yahoo.com;gmail.com", + "github": "https://github.com/verloop/few-shot-faqiran", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Verloop.io", + "aff_unique_dep": "", + "aff_unique_url": "https://www.verloop.io", + "aff_unique_abbr": "Verloop", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Unknown" + }, + { + "id": "2022.emnlp-main.19", + "title": "Multi-VQG: Generating Engaging Questions for Multiple Images", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Generating engaging content has drawn much recent attention in the NLP community. Asking questions is a natural way to respond to photos and promote awareness. However, most answers to questions in traditional question-answering (QA) datasets are factoids, which reduce individuals\u2019 willingness to answer. Furthermore, traditional visual question generation (VQG) confines the source data for question generation to single images, resulting in a limited ability to comprehend time-series information of the underlying event. In this paper, we propose generating engaging questions from multiple images. We present MVQG, a new dataset, and establish a series of baselines, including both end-to-end and dual-stage architectures. Results show that building stories behind the image sequence enables models togenerate engaging questions, which confirms our assumption that people typically construct a picture of the event in their minds before asking questions. These results open up an exciting challenge for visual-and-language models to implicitly construct a story behind a series of photos to allow for creativity and experience sharing and hence draw attention to downstream applications.", + "author": "Min-Hsuan Yeh; Vincent Chen; Ting-Hao Huang; Lun-Wei Ku", + "authorids": "/m/min-hsuan-yeh/; /v/vincent-chen/; /t/ting-hao-huang/; /l/lun-wei-ku/", + "bibtex": "@inproceedings{yeh-etal-2022-multi,\n title = \"Multi-{VQG}: Generating Engaging Questions for Multiple Images\",\n author = \"Yeh, Min-Hsuan and\n Chen, Vincent and\n Huang, Ting-Hao and\n Ku, Lun-Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.19/\",\n doi = \"10.18653/v1/2022.emnlp-main.19\",\n pages = \"277--290\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.19.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.19/", + "pdf_size": 1286524, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4851982330615170457&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of Massachusetts Amherst; University of Illinois Urbana-Champaign; Pennsylvania State University; Institute of Information Science, Academia Sinica", + "aff_domain": "umass.edu;illinois.edu;psu.edu;iis.sinica.edu", + "email": "umass.edu;illinois.edu;psu.edu;iis.sinica.edu", + "github": "https://github.com/AcademiaSinicaNLPLab/MVQG-Dataset-of-Generating-Engaging-Questions-for-Multiple-Images", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "University of Massachusetts Amherst;University of Illinois at Urbana-Champaign;Pennsylvania State University;Academia Sinica", + "aff_unique_dep": ";;;Institute of Information Science", + "aff_unique_url": "https://www.umass.edu;https://illinois.edu;https://www.psu.edu;https://www.sinica.edu.tw", + "aff_unique_abbr": "UMass Amherst;UIUC;PSU;AS", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Amherst;Urbana-Champaign;", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "United States;Taiwan, China" + }, + { + "id": "2022.findings-emnlp.481", + "title": "Multi-View Active Learning for Short Text Classification in User-Generated Data", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Mining user-generated data often suffers from the lack of enough labeled data, short document lengths, and the informal user language. In this paper, we propose a novel active learning model to overcome these obstacles in the tasks tailored for query phrases\u2013e.g., detecting positive reports of natural disasters. Our model has three novelties: 1) It is the first approach to employ multi-view active learning in this domain. 2) It uses the Parzen-Rosenblatt window method to integrate the representativeness measure into multi-view active learning. 3) It employs a query-by-committee strategy, based on the agreement between predictors, to address the usually noisy language of the documents in this domain. We evaluate our model in four publicly available Twitter datasets with distinctly different applications. We also compare our model with a wide range of baselines including those with multiple classifiers. The experiments testify that our model is highly consistent and outperforms existing models.", + "author": "Payam Karisani; Negin Karisani; Li Xiong", + "authorids": "/p/payam-karisani/; /n/negin-karisani/; /l/li-xiong/", + "bibtex": "@inproceedings{karisani-etal-2022-multi,\n title = \"Multi-View Active Learning for Short Text Classification in User-Generated Data\",\n author = \"Karisani, Payam and\n Karisani, Negin and\n Xiong, Li\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.481/\",\n doi = \"10.18653/v1/2022.findings-emnlp.481\",\n pages = \"6441--6453\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.481.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.481/", + "pdf_size": 2224180, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18003074768762268877&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Emory University; Purdue University; Emory University", + "aff_domain": "emory.edu;purdue.edu;emory.edu", + "email": "emory.edu;purdue.edu;emory.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Emory University;Purdue University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.emory.edu;https://www.purdue.edu", + "aff_unique_abbr": "Emory;Purdue", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.79", + "title": "Multi-View Reasoning: Consistent Contrastive Learning for Math Word Problem", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Math word problem solver requires both precise relation reasoning about quantities in the text and reliable generation for the diverse equation. Current sequence-to-tree or relation extraction methods regard this only from a fixed view, struggling to simultaneously handle complex semantics and diverse equations. However, human solving naturally involves two consistent reasoning views: top-down and bottom-up, just as math equations also can be expressed in multiple equivalent forms: pre-order and post-order. We propose a multi-view consistent contrastive learning for a more complete semantics-to-equation mapping. The entire process is decoupled into two independent but consistent views: top-down decomposition and bottom-up construction, and the two reasoning views are aligned in multi-granularity for consistency, enhancing global generation and precise reasoning. Experiments on multiple datasets across two languages show our approach significantly outperforms the existing baselines, especially on complex problems. We also show after consistent alignment, multi-view can absorb the merits of both views and generate more diverse results consistent with the mathematical laws.", + "author": "Wenqi Zhang; Yongliang Shen; Yanna Ma; Xiaoxia Cheng; Zeqi Tan; Qingpeng Nong; Weiming Lu", + "authorids": "/w/wenqi-zhang/; /y/yongliang-shen/; /y/yanna-ma/; /x/xiaoxia-cheng/; /z/zeqi-tan/; /q/qingpeng-nong/; /w/weiming-lu/", + "bibtex": "@inproceedings{zhang-etal-2022-multi-view,\n title = \"Multi-View Reasoning: Consistent Contrastive Learning for Math Word Problem\",\n author = \"Zhang, Wenqi and\n Shen, Yongliang and\n Ma, Yanna and\n Cheng, Xiaoxia and\n Tan, Zeqi and\n Nong, Qingpeng and\n Lu, Weiming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.79/\",\n doi = \"10.18653/v1/2022.findings-emnlp.79\",\n pages = \"1103--1116\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.79.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.79/", + "pdf_size": 1724086, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7724514052140558872&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; University of Shanghai for Science and Technology; College of Computer Science and Technology, Zhejiang University; College of Computer Science and Technology, Zhejiang University; Zhongxing Telecommunication Equipment Corporationy; College of Computer Science and Technology, Zhejiang University", + "aff_domain": "zju.edu.cn; ; ; ; ; ;zju.edu.cn", + "email": "zju.edu.cn; ; ; ; ; ;zju.edu.cn", + "github": "https://github.com/zwq2018/Multi-view-Consistency-for-MWP", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;2;0", + "aff_unique_norm": "Zhejiang University;University of Shanghai for Science and Technology;Zhongxing Telecommunication Equipment Corporation", + "aff_unique_dep": "College of Computer Science and Technology;;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.usst.edu.cn;http://www.zte.com.cn", + "aff_unique_abbr": "ZJU;USST;ZTE", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.202", + "title": "Multi-level Distillation of Semantic Knowledge for Pre-training Multilingual Language Model", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained multilingual language models play an important role in cross-lingual natural language understanding tasks. However, existing methods did not focus on learning the semantic structure of representation, and thus could not optimize their performance. In this paper, we propose Multi-level Multilingual Knowledge Distillation (MMKD), a novel method for improving multilingual language models. Specifically, we employ a teacher-student framework to adopt rich semantic representation knowledge in English BERT. We propose token-, word-, sentence-, and structure-level alignment objectives to encourage multiple levels of consistency between source-target pairs and correlation similarity between teacher and student models. We conduct experiments on cross-lingual evaluation benchmarks including XNLI, PAWS-X, and XQuAD. Experimental results show that MMKD outperforms other baseline models of similar size on XNLI and XQuAD and obtains comparable performance on PAWS-X. Especially, MMKD obtains significant performance gains on low-resource languages.", + "author": "Mingqi Li; Fei Ding; Dan Zhang; Long Cheng; Hongxin Hu; Feng Luo", + "authorids": "/m/mingqi-li/; /f/fei-ding/; /d/dan-zhang/; /l/long-cheng/; /h/hongxin-hu/; /f/feng-luo/", + "bibtex": "@inproceedings{li-etal-2022-multi-level,\n title = \"Multi-level Distillation of Semantic Knowledge for Pre-training Multilingual Language Model\",\n author = \"Li, Mingqi and\n Ding, Fei and\n Zhang, Dan and\n Cheng, Long and\n Hu, Hongxin and\n Luo, Feng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.202/\",\n doi = \"10.18653/v1/2022.emnlp-main.202\",\n pages = \"3097--3106\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.202.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.202/", + "pdf_size": 373078, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=645854838708891494&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Clemson University; Clemson University; Clemson University; Clemson University; University at Buffalo; Clemson University", + "aff_domain": "clemson.edu;clemson.edu;clemson.edu;clemson.edu;buffalo.edu;clemson.edu", + "email": "clemson.edu;clemson.edu;clemson.edu;clemson.edu;buffalo.edu;clemson.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "Clemson University;University at Buffalo", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.clemson.edu;https://www.buffalo.edu", + "aff_unique_abbr": "Clemson;UB", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.77", + "title": "Multilingual Machine Translation with Hyper-Adapters", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multilingual machine translation suffers from negative interference across languages. A common solution is to relax parameter sharing with language-specific modules like adapters. However, adapters of related languages are unable to transfer information, and their total number of parameters becomes prohibitively expensive as the number of languages grows. In this work, we overcome these drawbacks using hyper-adapters \u2013 hyper-networks that generate adapters from language and layer embeddings. While past work had poor results when scaling hyper-networks, we propose a rescaling fix that significantly improves convergence and enables training larger hyper-networks. We find that hyper-adapters are more parameter efficient than regular adapters, reaching the same performance with up to 12 times less parameters. When using the same number of parameters and FLOPS, our approach consistently outperforms regular adapters. Also, hyper-adapters converge faster than alternative approaches and scale better than regular dense networks. Our analysis shows that hyper-adapters learn to encode language relatedness, enabling positive transfer across languages.", + "author": "Christos Baziotis; Mikel Artetxe; James Cross; Shruti Bhosale", + "authorids": "/c/christos-baziotis/; /m/mikel-artetxe/; /j/james-cross/; /s/shruti-bhosale/", + "bibtex": "@inproceedings{baziotis-etal-2022-multilingual,\n title = \"Multilingual Machine Translation with Hyper-Adapters\",\n author = \"Baziotis, Christos and\n Artetxe, Mikel and\n Cross, James and\n Bhosale, Shruti\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.77/\",\n doi = \"10.18653/v1/2022.emnlp-main.77\",\n pages = \"1170--1185\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.77.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.77/", + "pdf_size": 496457, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4597100152581820629&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Institute for Language, Cognition and Computation, School of Informatics, University of Edinburgh + Meta AI Research, Menlo Park, CA, USA; Meta AI Research, Menlo Park, CA, USA; Meta AI Research, Menlo Park, CA, USA; Meta AI Research, Menlo Park, CA, USA", + "aff_domain": "ed.ac.uk; ; ;fb.com", + "email": "ed.ac.uk; ; ;fb.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;1;1", + "aff_unique_norm": "University of Edinburgh;Meta AI Research", + "aff_unique_dep": "School of Informatics;AI Research", + "aff_unique_url": "https://www.ed.ac.uk;https://meta.ai", + "aff_unique_abbr": "Edinburgh;Meta AI", + "aff_campus_unique_index": "0+1;1;1;1", + "aff_campus_unique": "Edinburgh;Menlo Park", + "aff_country_unique_index": "0+1;1;1;1", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "2022.findings-emnlp.308", + "title": "Multilingual Multimodal Learning with Machine Translated Text", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Most vision-and-language pretraining research focuses on English tasks. However, the creation of multilingual multimodal evaluation datasets (e.g. Multi30K, xGQA, XVNLI, and MaRVL) poses a new challenge in finding high-quality training data that is both multilingual and multimodal. In this paper, we investigate whether machine translating English multimodal data can be an effective proxy for the lack of readily available multilingual data. We call this framework TD-MML: Translated Data for Multilingual Multimodal Learning, and it can be applied to any multimodal dataset and model. We apply it to both pretraining and fine-tuning data with a state-of-the-art model. In order to prevent models from learning from low-quality translated text, we propose two metrics for automatically removing such translations from the resulting datasets. In experiments on five tasks across 20 languages in the IGLUE benchmark, we show that translated data can provide a useful signal for multilingual multimodal learning, both at pretraining and fine-tuning.", + "author": "Chen Qiu; Dan Onea\u021b\u0103; Emanuele Bugliarello; Stella Frank; Desmond Elliott", + "authorids": "/c/chen-qiu/; /d/dan-oneata/; /e/emanuele-bugliarello/; /s/stella-frank/; /d/desmond-elliott/", + "bibtex": "@inproceedings{qiu-etal-2022-multilingual,\n title = \"Multilingual Multimodal Learning with Machine Translated Text\",\n author = \"Qiu, Chen and\n Onea\u021b{\\u{a}}, Dan and\n Bugliarello, Emanuele and\n Frank, Stella and\n Elliott, Desmond\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.308/\",\n doi = \"10.18653/v1/2022.findings-emnlp.308\",\n pages = \"4178--4193\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.308.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.308/", + "pdf_size": 1952279, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5163155426879396399&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "School of Computer Science and Technology, Wuhan University of Science and Technology, China; University Politehnica of Bucharest, Romania; Department of Computer Science, University of Copenhagen, Denmark + Pioneer Centre for AI, Denmark; Department of Computer Science, University of Copenhagen, Denmark + Pioneer Centre for AI, Denmark; Department of Computer Science, University of Copenhagen, Denmark + Pioneer Centre for AI, Denmark", + "aff_domain": "wust.edu.cn;speed.pub.ro;di.ku.dk;di.ku.dk;di.ku.dk", + "email": "wust.edu.cn;speed.pub.ro;di.ku.dk;di.ku.dk;di.ku.dk", + "github": "https://github.com/danoneata/td-mml4178", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2+3;2+3;2+3", + "aff_unique_norm": "Wuhan University of Science and Technology;University Politehnica of Bucharest;University of Copenhagen;Pioneer Centre for AI", + "aff_unique_dep": "School of Computer Science and Technology;;Department of Computer Science;", + "aff_unique_url": "http://www.wust.edu.cn;https://www.upb.ro;https://www.ku.dk;", + "aff_unique_abbr": "WUST;UPB;UCPH;", + "aff_campus_unique_index": "0;;;", + "aff_campus_unique": "Wuhan;", + "aff_country_unique_index": "0;1;2+2;2+2;2+2", + "aff_country_unique": "China;Romania;Denmark" + }, + { + "id": "2022.emnlp-main.69", + "title": "Multilingual Relation Classification via Efficient and Effective Prompting", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prompting pre-trained language models has achieved impressive performance on various NLP tasks, especially in low data regimes. Despite the success of prompting in monolingual settings, applying prompt-based methods in multilingual scenarios has been limited to a narrow set of tasks, due to the high cost of handcrafting multilingual prompts. In this paper, we present the first work on prompt-based multilingual relation classification (RC), by introducing an efficient and effective method that constructs prompts from relation triples and involves only minimal translation for the class labels. We evaluate its performance in fully supervised, few-shot and zero-shot scenarios, and analyze its effectiveness across 14 languages, prompt variants, and English-task training in cross-lingual settings. We find that in both fully supervised and few-shot scenarios, our prompt method beats competitive baselines: fine-tuning XLM-R_EM and null prompts. It also outperforms the random baseline by a large margin in zero-shot experiments. Our method requires little in-language knowledge and can be used as a strong baseline for similar multilingual classification tasks.", + "author": "Yuxuan Chen; David Harbecke; Leonhard Hennig", + "authorids": "/y/yuxuan-chen/; /d/david-harbecke/; /l/leonhard-hennig/", + "bibtex": "@inproceedings{chen-etal-2022-multilingual,\n title = \"Multilingual Relation Classification via Efficient and Effective Prompting\",\n author = \"Chen, Yuxuan and\n Harbecke, David and\n Hennig, Leonhard\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.69/\",\n doi = \"10.18653/v1/2022.emnlp-main.69\",\n pages = \"1059--1075\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.69.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.69/", + "pdf_size": 2692450, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11832159124136265492&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "German Research Center for Artificial Intelligence (DFKI) Speech and Language Technology Lab; German Research Center for Artificial Intelligence (DFKI) Speech and Language Technology Lab; German Research Center for Artificial Intelligence (DFKI) Speech and Language Technology Lab", + "aff_domain": "dfki.de;dfki.de;dfki.de", + "email": "dfki.de;dfki.de;dfki.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "German Research Center for Artificial Intelligence", + "aff_unique_dep": "Speech and Language Technology Lab", + "aff_unique_url": "https://www.dFKI.de", + "aff_unique_abbr": "DFKI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.215", + "title": "Multilingual Sentence Transformer as A Multilingual Word Aligner", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multilingual pretrained language models (mPLMs) have shown their effectiveness in multilingual word alignment induction. However, these methods usually start from mBERT or XLM-R. In this paper, we investigate whether multilingual sentence Transformer LaBSE is a strong multilingual word aligner. This idea is non-trivial as LaBSE is trained to learn language-agnostic sentence-level embeddings, while the alignment extraction task requires the more fine-grained word-level embeddings to be language-agnostic. We demonstrate that the vanilla LaBSE outperforms other mPLMs currently used in the alignment task, and then propose to finetune LaBSE on parallel corpus for further improvement. Experiment results on seven language pairs show that our best aligner outperforms previous state-of-the-art models of all varieties. In addition, our aligner supports different language pairs in a single model, and even achieves new state-of-the-art on zero-shot language pairs that does not appear in the finetuning process.", + "author": "Weikang Wang; Guanhua Chen; Hanqing Wang; Yue Han; Yun Chen", + "authorids": "/w/weikang-wang/; /g/guanhua-chen/; /h/hanqing-wang/; /y/yue-han/; /y/yun-chen/", + "bibtex": "@inproceedings{wang-etal-2022-multilingual,\n title = \"Multilingual Sentence Transformer as A Multilingual Word Aligner\",\n author = \"Wang, Weikang and\n Chen, Guanhua and\n Wang, Hanqing and\n Han, Yue and\n Chen, Yun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.215/\",\n doi = \"10.18653/v1/2022.findings-emnlp.215\",\n pages = \"2952--2963\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.215.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.215/", + "pdf_size": 1995360, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10721747092538328472&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Shanghai University of Finance and Economics; Southern University of Science and Technology; Shanghai University of Finance and Economics; Shanghai University of Finance and Economics; Shanghai University of Finance and Economics", + "aff_domain": "163.sufe.edu.cn;gmail.com;163.sufe.edu.cn;163.sufe.edu.cn;sufe.edu.cn", + "email": "163.sufe.edu.cn;gmail.com;163.sufe.edu.cn;163.sufe.edu.cn;sufe.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Shanghai University of Finance and Economics;Southern University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.sufe.edu.cn;https://www.sustech.edu.cn", + "aff_unique_abbr": "SUFE;SUSTech", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.407", + "title": "Multilingual SubEvent Relation Extraction: A Novel Dataset and Structure Induction Method", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Subevent Relation Extraction (SRE) is a task in Information Extraction that aims to recognize spatial and temporal containment relations between event mentions in text. Recent methods have utilized pre-trained language models to represent input texts for SRE. However, a key issue in existing SRE methods is the employment of sequential order of words in texts to feed into representation learning methods, thus unable to explicitly focus on important context words and their interactions to enhance representations. In this work, we introduce a new method for SRE that learns to induce effective graph structures for input texts to boost representation learning. Our method features a word alignment framework with dependency paths and optimal transport to identify important context words to form effective graph structures for SRE. In addition, to enable SRE research on non-English languages, we present a new multilingual SRE dataset for five typologically different languages. Extensive experiments reveal the state-of-the-art performance for our method on different datasets and languages.", + "author": "Viet Lai; Hieu Man; Linh Ngo; Franck Dernoncourt; Thien Nguyen", + "authorids": "/v/viet-lai/; /h/hieu-man/; /l/linh-ngo/; /f/franck-dernoncourt/; /t/thien-nguyen/", + "bibtex": "@inproceedings{lai-etal-2022-multilingual-subevent,\n title = \"Multilingual {S}ub{E}vent Relation Extraction: A Novel Dataset and Structure Induction Method\",\n author = \"Lai, Viet and\n Man, Hieu and\n Ngo, Linh and\n Dernoncourt, Franck and\n Nguyen, Thien\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.407/\",\n doi = \"10.18653/v1/2022.findings-emnlp.407\",\n pages = \"5559--5570\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.407.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.407/", + "pdf_size": 296177, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15813326457159568372&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Department of Computer Science, University of Oregon, Eugene, Oregon, USA; VinAI Research, Vietnam; Hanoi University of Science and Technology, Hanoi, Vietnam; Adobe Research, Seattle, WA, USA; Department of Computer Science, University of Oregon, Eugene, Oregon, USA", + "aff_domain": "cs.uoregon.edu;cs.uoregon.edu;vinai.io;soict.hust.edu.vn;adobe.com", + "email": "cs.uoregon.edu;cs.uoregon.edu;vinai.io;soict.hust.edu.vn;adobe.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;0", + "aff_unique_norm": "University of Oregon;VinAI Research;Hanoi University of Science and Technology;Adobe Research", + "aff_unique_dep": "Department of Computer Science;;;", + "aff_unique_url": "https://www.uoregon.edu;https://www.vin.ai;https://www.hust.edu.vn;https://research.adobe.com", + "aff_unique_abbr": "UO;VinAI;HUST;Adobe", + "aff_campus_unique_index": "0;2;3;0", + "aff_campus_unique": "Eugene;;Hanoi;Seattle", + "aff_country_unique_index": "0;1;1;0;0", + "aff_country_unique": "United States;Vietnam" + }, + { + "id": "2022.emnlp-industry.42", + "title": "Multimodal Context Carryover", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Multi-modality support has become an integral part of creating a seamless user experience with modern voice assistants with smart displays. Users refer to images, video thumbnails, or the accompanying text descriptions on the screen through voice communication with AI powered devices. This raises the need to either augment existing commercial voice only dialogue systems with state-of-the-art multimodal components, or to introduce entirely new architectures; where the latter can lead to costly system revamps. To support the emerging visual navigation and visual product selection use cases, we propose to augment commercially deployed voice-only dialogue systems with additional multi-modal components. In this work, we present a novel yet pragmatic approach to expand an existing dialogue-based context carryover system (Chen et al., 2019a) in a voice assistant with state-of-the-art multimodal components to facilitate quick delivery of visual modality support with minimum changes. We demonstrate a 35% accuracy improvement over the existing system on an in-house multi-modal visual navigation data set.", + "author": "Prashan Wanigasekara; Nalin Gupta; Fan Yang; Emre Barut; Zeynab Raeesy; Kechen Qin; Stephen Rawls; Xinyue Liu; Chengwei Su; Spurthi Sandiri", + "authorids": "/p/prashan-wanigasekara/; /n/nalin-gupta/; /f/fan-yang/; /e/emre-barut/; /z/zeynab-raeesy/; /k/kechen-qin/; /s/stephen-rawls/; /x/xinyue-liu/; /c/chengwei-su/; /s/spurthi-sandiri/", + "bibtex": "@inproceedings{wanigasekara-etal-2022-multimodal,\n title = \"Multimodal Context Carryover\",\n author = \"Wanigasekara, Prashan and\n Gupta, Nalin and\n Yang, Fan and\n Barut, Emre and\n Raeesy, Zeynab and\n Qin, Kechen and\n Rawls, Stephen and\n Liu, Xinyue and\n Su, Chengwei and\n Sandiri, Spurthi\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.42/\",\n doi = \"10.18653/v1/2022.emnlp-industry.42\",\n pages = \"417--428\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.42.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.42/", + "pdf_size": 3388181, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4407461008224137589&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Alexa AI-Natural Understanding, Amazon; Alexa AI-Natural Understanding, Amazon; Alexa AI-Natural Understanding, Amazon; Alexa AI-Natural Understanding, Amazon; Alexa AI-Natural Understanding, Amazon; Alexa AI-Natural Understanding, Amazon; Alexa AI-Natural Understanding, Amazon; Alexa AI-Natural Understanding, Amazon; Alexa AI-Natural Understanding, Amazon; Alexa AI-Natural Understanding, Amazon", + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Amazon", + "aff_unique_dep": "Alexa AI-Natural Understanding", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.36", + "title": "Multimodal Contrastive Learning via Uni-Modal Coding and Cross-Modal Prediction for Multimodal Sentiment Analysis", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multimodal representation learning is a challenging task in which previous work mostly focus on either uni-modality pre-training or cross-modality fusion. In fact, we regard modeling multimodal representation as building a skyscraper, where laying stable foundation and designing the main structure are equally essential. The former is like encoding robust uni-modal representation while the later is like integrating interactive information among different modalities, both of which are critical to learning an effective multimodal representation. Recently, contrastive learning has been successfully applied in representation learning, which can be utilized as the pillar of the skyscraper and benefit the model to extract the most important features contained in the multimodal data. In this paper, we propose a novel framework named MultiModal Contrastive Learning (MMCL) for multimodal representation to capture intra- and inter-modality dynamics simultaneously. Specifically, we devise uni-modal contrastive coding with an efficient uni-modal feature augmentation strategy to filter inherent noise contained in acoustic and visual modality and acquire more robust uni-modality representations. Besides, a pseudo siamese network is presented to predict representation across different modalities, which successfully captures cross-modal dynamics. Moreover, we design two contrastive learning tasks, instance- and sentiment-based contrastive learning, to promote the process of prediction and learn more interactive information related to sentiment. Extensive experiments conducted on two public datasets demonstrate that our method surpasses the state-of-the-art methods.", + "author": "Ronghao Lin; Haifeng Hu", + "authorids": "/r/ronghao-lin/; /h/haifeng-hu/", + "bibtex": "@inproceedings{lin-hu-2022-multimodal,\n title = \"Multimodal Contrastive Learning via Uni-Modal Coding and Cross-Modal Prediction for Multimodal Sentiment Analysis\",\n author = \"Lin, Ronghao and\n Hu, Haifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.36/\",\n doi = \"10.18653/v1/2022.findings-emnlp.36\",\n pages = \"511--523\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.36.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.36/", + "pdf_size": 1019342, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9804783200591448919&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Sun Yat-sen University, China; Sun Yat-sen University, China", + "aff_domain": "mail2.sysu.edu.cn;mail.sysu.edu.cn", + "email": "mail2.sysu.edu.cn;mail.sysu.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Sun Yat-sen University", + "aff_unique_dep": "", + "aff_unique_url": "http://www.sysu.edu.cn", + "aff_unique_abbr": "SYSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.376", + "title": "Multimodal Conversation Modelling for Topic Derailment Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Conversations on social media tend to go off-topic and turn into different and sometimes toxic exchanges. Previous work focuses on analysing textual dialogues that have derailed into toxic content, but the range of derailment types is much broader, including spam or bot content, tangential comments, etc. In addition, existing work disregards conversations that involve visual information (i.e. images or videos), which are prevalent on most platforms. In this paper, we take a broader view of conversation derailment and propose a new challenge: detecting derailment based on the \u201cchange of conversation topic\u201d, where the topic is defined by an initial post containing both a text and an image. For that, we (i) create the first Multimodal Conversation Derailment (MCD) dataset, and (ii) introduce a new multimodal conversational architecture (MMConv) that utilises visual and conversational contexts to classify comments for derailment. Experiments show that MMConv substantially outperforms previous text-based approaches to detect conversation derailment, as well as general multimodal classifiers. MMConv is also more robust to textual noise, since it relies on richer contextual information.", + "author": "Zhenhao Li; Marek Rei; Lucia Specia", + "authorids": "/z/zhenhao-li/; /m/marek-rei/; /l/lucia-specia/", + "bibtex": "@inproceedings{li-etal-2022-multimodal,\n title = \"Multimodal Conversation Modelling for Topic Derailment Detection\",\n author = \"Li, Zhenhao and\n Rei, Marek and\n Specia, Lucia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.376/\",\n doi = \"10.18653/v1/2022.findings-emnlp.376\",\n pages = \"5115--5127\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.376.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.376/", + "pdf_size": 3439483, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13695877978551270312&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "https://github.com/Nickeilf/Multimodal-Conversation-Derailment", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.230", + "title": "Multimodal Knowledge Learning for Named Entity Disambiguation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "With the popularity of online social media, massive-scale multimodal information has brought new challenges to traditional Named Entity Disambiguation (NED) tasks. Recently, Multimodal Named Entity Disambiguation (MNED) has been proposed to link ambiguous mentions with the textual and visual contexts to a predefined knowledge graph. Existing attempts usually perform MNED by annotating multimodal mentions and adding multimodal features to traditional NED models. However, these studies may suffer from 1) failing to model multimodal information at the knowledge level, and 2) lacking multimodal annotation data against the large-scale unlabeled corpus. In this paper, we explore a pioneer study on leveraging multimodal knowledge learning to address the MNED task. Specifically, we first harvest multimodal knowledge in the Meta-Learning way, which is much easier than collecting ambiguous mention corpus. Then we design a knowledge-guided transfer learning strategy to extract unified representation from different modalities. Finally, we propose an Interactive Multimodal Learning Network (IMN) to fully utilize the multimodal information on both the mention and knowledge sides. Extensive experiments conducted on two public MNED datasets demonstrate that the proposed method achieves improvements over the state-of-the-art multimodal methods.", + "author": "Zhang Dongjie; Longtao Huang", + "authorids": "/z/zhang-dongjie/; /l/longtao-huang/", + "bibtex": "@inproceedings{dongjie-huang-2022-multimodal,\n title = \"Multimodal Knowledge Learning for Named Entity Disambiguation\",\n author = \"Dongjie, Zhang and\n Huang, Longtao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.230/\",\n doi = \"10.18653/v1/2022.findings-emnlp.230\",\n pages = \"3160--3169\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.230.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.230/", + "pdf_size": 1839867, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2115087664570697977&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2 + }, + { + "id": "2022.emnlp-main.582", + "title": "Multimodal Robustness for Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we look at the case of a Generic text-to-text NMT model that has to deal with data coming from various modalities, like speech, images, or noisy text extracted from the web. We propose a two-step method, based on composable adapters, to deal with this problem of Multimodal Robustness. In a first step, we separately learn domain adapters and modality specific adapters, to deal with noisy input coming from various sources: ASR, OCR, or noisy text (UGC). In a second step, we combine these components at runtime via dynamic routing or, when the source of noise is unknown, via two new transfer learning mechanisms (Fast Fusion and Multi Fusion). We show that our method provides a flexible, state-of-the-art, architecture able to deal with noisy multimodal inputs.", + "author": "Yuting Zhao; Ioan Calapodescu", + "authorids": "/y/yuting-zhao/; /i/ioan-calapodescu/", + "bibtex": "@inproceedings{zhao-calapodescu-2022-multimodal,\n title = \"Multimodal Robustness for Neural Machine Translation\",\n author = \"Zhao, Yuting and\n Calapodescu, Ioan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.582/\",\n doi = \"10.18653/v1/2022.emnlp-main.582\",\n pages = \"8505--8516\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.582.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.582/", + "pdf_size": 626907, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=200925128827770763&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Tokyo Metropolitan University + Naver Labs Europe; Naver Labs Europe", + "aff_domain": "ed.tmu.ac.jp;naverlabs.com", + "email": "ed.tmu.ac.jp;naverlabs.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "Tokyo Metropolitan University;Naver Labs", + "aff_unique_dep": ";Naver Labs Europe", + "aff_unique_url": "https://www.tmuc.ac.jp;https://labs.naver.com", + "aff_unique_abbr": "TMU;NLE", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1", + "aff_country_unique": "Japan;Europe" + }, + { + "id": "2022.findings-emnlp.546", + "title": "Multiple Instance Learning for Offensive Language Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Automatic offensive language detection has become a crucial issue in recent years. Existing researches on this topic are usually based on a large amount of data annotated at sentence level to train a robust model. However, sentence-level annotations are expensive in practice as the scenario expands, while there exist a large amount of natural labels from historical information on online platforms such as reports and punishments. Notably, these natural labels are usually in bag-level corresponding to the whole documents (articles, user profiles, conversations, etc.). Therefore, we target at proposing an approach capable of utilizing the bag-level labeled data for offensive language detection in this study. For this purpose, we formalize this task into a multiple instance learning (MIL) problem. We break down the design of existing MIL methods and propose a hybrid fusion MIL model with mutual-attention mechanism. In order to verify the validity of the proposed method, we present two new bag-level labeled datasets for offensive language detection: OLID-bags and MINOR. Experimental results based on the proposed datasets demonstrate the effectiveness of the mutual-attention method at both sentence level and bag level.", + "author": "Jiexi Liu; Dehan Kong; Longtao Huang; Dinghui Mao; Hui Xue", + "authorids": "/j/jiexi-liu/; /d/dehan-kong/; /l/longtao-huang/; /d/dinghui-mao/; /h/hui-xue/", + "bibtex": "@inproceedings{liu-etal-2022-multiple,\n title = \"Multiple Instance Learning for Offensive Language Detection\",\n author = \"Liu, Jiexi and\n Kong, Dehan and\n Huang, Longtao and\n Mao, Dinghui and\n Xue, Hui\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.546/\",\n doi = \"10.18653/v1/2022.findings-emnlp.546\",\n pages = \"7387--7396\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.546.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.546/", + "pdf_size": 2066364, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4050468795559435391&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group", + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.560", + "title": "Multitask Instruction-based Prompting for Fallacy Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Fallacies are used as seemingly valid arguments to support a position and persuade the audience about its validity. Recognizing fallacies is an intrinsically difficult task both for humans and machines. Moreover, a big challenge for computational models lies in the fact that fallacies are formulated differently across the datasets with differences in the input format (e.g., question-answer pair, sentence with fallacy fragment), genre (e.g., social media, dialogue, news), as well as types and number of fallacies (from 5 to 18 types per dataset). To move towards solving the fallacy recognition task, we approach these differences across datasets as multiple tasks and show how instruction-based prompting in a multitask setup based on the T5 model improves the results against approaches built for a specific dataset such as T5, BERT or GPT-3. We show the ability of this multitask prompting approach to recognize 28 unique fallacies across domains and genres and study the effect of model size and prompt choice by analyzing the per-class (i.e., fallacy type) results. Finally, we analyze the effect of annotation quality on model performance, and the feasibility of complementing this approach with external knowledge.", + "author": "Tariq Alhindi; Tuhin Chakrabarty; Elena Musi; Smaranda Muresan", + "authorids": "/t/tariq-alhindi/; /t/tuhin-chakrabarty/; /e/elena-musi/; /s/smaranda-muresan/", + "bibtex": "@inproceedings{alhindi-etal-2022-multitask,\n title = \"Multitask Instruction-based Prompting for Fallacy Recognition\",\n author = \"Alhindi, Tariq and\n Chakrabarty, Tuhin and\n Musi, Elena and\n Muresan, Smaranda\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.560/\",\n doi = \"10.18653/v1/2022.emnlp-main.560\",\n pages = \"8172--8187\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.560.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.560/", + "pdf_size": 447088, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14370290856095967196&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science, Columbia University + Data Science Institute, Columbia University; Department of Computer Science, Columbia University + Data Science Institute, Columbia University; Department of Communication and Media, University of Liverpool; Department of Computer Science, Columbia University + Data Science Institute, Columbia University", + "aff_domain": "cs.columbia.edu;cs.columbia.edu;liverpool.ac.uk;cs.columbia.edu", + "email": "cs.columbia.edu;cs.columbia.edu;liverpool.ac.uk;cs.columbia.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;1;0+0", + "aff_unique_norm": "Columbia University;University of Liverpool", + "aff_unique_dep": "Department of Computer Science;Department of Communication and Media", + "aff_unique_url": "https://www.columbia.edu;https://www.liverpool.ac.uk", + "aff_unique_abbr": "Columbia;Liv Uni", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;1;0+0", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "2022.emnlp-main.808", + "title": "Mutual Exclusivity Training and Primitive Augmentation to Induce Compositionality", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent datasets expose the lack of the systematic generalization ability in standard sequence-to-sequence models. In this work, we analyze this behavior of seq2seq models and identify two contributing factors: a lack of mutual exclusivity bias (one target sequence can only be mapped to one source sequence), and the tendency to memorize whole examples rather than separating structures from contents. We propose two techniques to address these two issues respectively: Mutual Exclusivity Training that prevents the model from producing seen generations when facing novel examples via an unlikelihood-based loss, and prim2primX data augmentation that automatically diversifies the arguments of every syntactic function to prevent memorizing and provide a compositional inductive bias without exposing test-set data. Combining these two techniques, we show substantial empirical improvements using standard sequence-to-sequence models (LSTMs and Transformers) on two widely-used compositionality datasets: SCAN and COGS. Finally, we provide analysis characterizing the improvements as well as the remaining challenges, and provide detailed ablations of our method.", + "author": "Yichen Jiang; Xiang Zhou; Mohit Bansal", + "authorids": "/y/yichen-jiang/; /x/xiang-zhou/; /m/mohit-bansal/", + "bibtex": "@inproceedings{jiang-etal-2022-mutual,\n title = \"Mutual Exclusivity Training and Primitive Augmentation to Induce Compositionality\",\n author = \"Jiang, Yichen and\n Zhou, Xiang and\n Bansal, Mohit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.808/\",\n doi = \"10.18653/v1/2022.emnlp-main.808\",\n pages = \"11778--11793\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.808.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.808/", + "pdf_size": 453166, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16405515041831876245&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "UNC Chapel Hill; UNC Chapel Hill; UNC Chapel Hill", + "aff_domain": "cs.unc.edu;cs.unc.edu;cs.unc.edu", + "email": "cs.unc.edu;cs.unc.edu;cs.unc.edu", + "github": "https://github.com/owenzx/met-primaug", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of North Carolina at Chapel Hill", + "aff_unique_dep": "", + "aff_unique_url": "https://www.unc.edu", + "aff_unique_abbr": "UNC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chapel Hill", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.399", + "title": "Mutual Information Alleviates Hallucinations in Abstractive Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite significant progress in the quality of language generated from abstractive summarization models, these models still exhibit the tendency to hallucinate, i.e., output content not supported by the source document. A number of works have tried to fix\u2014or at least uncover the source of\u2014the problem with limited success. In this paper, we identify a simple criterion under which models are significantly more likely to assign more probability to hallucinated content during generation: high model uncertainty. This finding offers a potential explanation for hallucinations: models default to favoring text with high marginal probability, i.e., high-frequency occurrences in the training set, when uncertain about a continuation. It also motivates possible routes for real-time intervention during decoding to prevent such hallucinations. We propose a decoding strategy that switches to optimizing for pointwise mutual information of the source and target token\u2014rather than purely the probability of the target token\u2014when the model exhibits uncertainty. Experiments on the dataset show that our method decreases the probability of hallucinated tokens while maintaining the Rouge and BERT-S scores of top-performing decoding strategies.", + "author": "Liam van der Poel; Ryan Cotterell; Clara Meister", + "authorids": "/l/liam-van-der-poel/; /r/ryan-cotterell/; /c/clara-meister/", + "bibtex": "@inproceedings{van-der-poel-etal-2022-mutual,\n title = \"Mutual Information Alleviates Hallucinations in Abstractive Summarization\",\n author = \"van der Poel, Liam and\n Cotterell, Ryan and\n Meister, Clara\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.399/\",\n doi = \"10.18653/v1/2022.emnlp-main.399\",\n pages = \"5956--5965\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.399.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.399/", + "pdf_size": 469055, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15224632582229290042&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "ETH Zurich; ETH Zurich; ETH Zurich", + "aff_domain": "ethz.ch;inf.ethz.ch;inf.ethz.ch", + "email": "ethz.ch;inf.ethz.ch;inf.ethz.ch", + "github": "https://github.com/VanderpoelLiam/CPMI", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "ETH Zurich", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ethz.ch", + "aff_unique_abbr": "ETHZ", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Switzerland" + }, + { + "id": "2022.findings-emnlp.15", + "title": "NMTScore: A Multilingual Analysis of Translation-based Text Similarity Measures", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Being able to rank the similarity of short text segments is an interesting bonus feature of neural machine translation. Translation-based similarity measures include direct and pivot translation probability, as well as translation cross-likelihood, which has not been studied so far. We analyze these measures in the common framework of multilingual NMT, releasing the NMTScore library. Compared to baselines such as sentence embeddings, translation-based measures prove competitive in paraphrase identification and are more robust against adversarial or multilingual input, especially if proper normalization is applied. When used for reference-based evaluation of data-to-text generation in 2 tasks and 17 languages, translation-based measures show a relatively high correlation to human judgments.", + "author": "Jannis Vamvas; Rico Sennrich", + "authorids": "/j/jannis-vamvas/; /r/rico-sennrich/", + "bibtex": "@inproceedings{vamvas-sennrich-2022-nmtscore,\n title = \"{NMTS}core: A Multilingual Analysis of Translation-based Text Similarity Measures\",\n author = \"Vamvas, Jannis and\n Sennrich, Rico\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.15/\",\n doi = \"10.18653/v1/2022.findings-emnlp.15\",\n pages = \"198--213\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.15.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.15/", + "pdf_size": 420811, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17663323626599125756&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Department of Computational Linguistics, University of Zurich + School of Informatics, University of Edinburgh; Department of Computational Linguistics, University of Zurich + School of Informatics, University of Edinburgh", + "aff_domain": "cl.uzh.ch;cl.uzh.ch", + "email": "cl.uzh.ch;cl.uzh.ch", + "github": "https://github.com/ZurichNLP/nmtscore198", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "University of Zurich;University of Edinburgh", + "aff_unique_dep": "Department of Computational Linguistics;School of Informatics", + "aff_unique_url": "https://www.unizh.ch;https://www.ed.ac.uk", + "aff_unique_abbr": "UZH;Edinburgh", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Edinburgh", + "aff_country_unique_index": "0+1;0+1", + "aff_country_unique": "Switzerland;United Kingdom" + }, + { + "id": "2022.emnlp-industry.35", + "title": "Named Entity Recognition in Industrial Tables using Tabular Language Models", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Specialized transformer-based models for encoding tabular data have gained interest in academia. Although tabular data is omnipresent in industry, applications of table transformers are still missing. In this paper, we study how these models can be applied to an industrial Named Entity Recognition (NER) problem where the entities are mentioned in tabular-structured spreadsheets. The highly technical nature of spreadsheets as well as the lack of labeled data present major challenges for fine-tuning transformer-based models. Therefore, we develop a dedicated table data augmentation strategy based on available domain-specific knowledge graphs. We show that this boosts performance in our low-resource scenario considerably. Further, we investigate the benefits of tabular structure as inductive bias compared to tables as linearized sequences. Our experiments confirm that a table transformer outperforms other baselines and that its tabular inductive bias is vital for convergence of transformer-based models.", + "author": "Aneta Koleva; Martin Ringsquandl; Mark Buckley; Rakeb Hasan; Volker Tresp", + "authorids": "/a/aneta-koleva/; /m/martin-ringsquandl/; /m/mark-buckley/; /r/rakeb-hasan/; /v/volker-tresp/", + "bibtex": "@inproceedings{koleva-etal-2022-named,\n title = \"Named Entity Recognition in Industrial Tables using Tabular Language Models\",\n author = \"Koleva, Aneta and\n Ringsquandl, Martin and\n Buckley, Mark and\n Hasan, Rakeb and\n Tresp, Volker\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.35/\",\n doi = \"10.18653/v1/2022.emnlp-industry.35\",\n pages = \"348--356\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.35.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.35/", + "pdf_size": 738309, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16141822043127596973&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Siemens AG + Ludwig-Maximilians University; Siemens AG + Ludwig-Maximilians University; Siemens AG; Siemens AG; Siemens AG + Ludwig-Maximilians University", + "aff_domain": "siemens.com;siemens.com;siemens.com;siemens.com;siemens.com", + "email": "siemens.com;siemens.com;siemens.com;siemens.com;siemens.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0;0;0+1", + "aff_unique_norm": "Siemens AG;Ludwig-Maximilians-Universit\u00e4t M\u00fcnchen", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.siemens.com;https://www.lmu.de", + "aff_unique_abbr": "Siemens;LMU", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0;0+0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.437", + "title": "Named Entity and Relation Extraction with Multi-Modal Retrieval", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multi-modal named entity recognition (NER) and relation extraction (RE) aim to leverage relevant image information to improve the performance of NER and RE. Most existing efforts largely focused on directly extracting potentially useful information from images (such as pixel-level features, identified objects, and associated captions).However, such extraction processes may not be knowledge aware, resulting in information that may not be highly relevant.In this paper, we propose a novel Multi-modal Retrieval based framework (MoRe).MoRe contains a text retrieval module and an image-based retrieval module, which retrieve related knowledge of the input text and image in the knowledge corpus respectively.Next, the retrieval results are sent to the textual and visual models respectively for predictions.Finally, a Mixture of Experts (MoE) module combines the predictions from the two models to make the final decision.Our experiments show that both our textual model and visual model can achieve state-of-the-art performance on four multi-modal NER datasets and one multi-modal RE dataset.With MoE, the model performance can be further improved and our analysis demonstrates the benefits of integrating both textual and visual cues for such tasks.", + "author": "Xinyu Wang; Jiong Cai; Yong Jiang; Pengjun Xie; Kewei Tu; Wei Lu", + "authorids": "/x/xinyu-wang/; /j/jiong-cai/; /y/yong-jiang/; /p/pengjun-xie/; /k/kewei-tu/; /w/wei-lu/", + "bibtex": "@inproceedings{wang-etal-2022-named,\n title = \"Named Entity and Relation Extraction with Multi-Modal Retrieval\",\n author = \"Wang, Xinyu and\n Cai, Jiong and\n Jiang, Yong and\n Xie, Pengjun and\n Tu, Kewei and\n Lu, Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.437/\",\n doi = \"10.18653/v1/2022.findings-emnlp.437\",\n pages = \"5925--5936\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.437.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.437/", + "pdf_size": 1253203, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11838520667963131115&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "School of Information Science and Technology, ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging + Shanghai Institute of Microsystem and Information Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; School of Information Science and Technology, ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging + Shanghai Institute of Microsystem and Information Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; School of Information Science and Technology, ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging + Shanghai Institute of Microsystem and Information Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; ; School of Information Science and Technology, ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging + Shanghai Institute of Microsystem and Information Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; StatNLP Research Group, Singapore University of Technology and Design", + "aff_domain": "shanghaitech.edu.cn;shanghaitech.edu.cn;gmail.com;gmail.com;shanghaitech.edu.cn;sutd.edu.sg", + "email": "shanghaitech.edu.cn;shanghaitech.edu.cn;gmail.com;gmail.com;shanghaitech.edu.cn;sutd.edu.sg", + "github": "http://github.com/modelscope/adaseq/examples/MoRe", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2+3;0+1+2+3;0+1+2+3;0+1+2+3;4", + "aff_unique_norm": "ShanghaiTech University;Shanghai Engineering Research Center of Intelligent Vision and Imaging;Shanghai Institute of Microsystem and Information Technology;University of Chinese Academy of Sciences;Singapore University of Technology and Design", + "aff_unique_dep": "School of Information Science and Technology;;;;StatNLP Research Group", + "aff_unique_url": "https://www.shanghaitech.edu.cn;;http://www.sim.cas.cn;http://www.ucas.ac.cn;https://www.sutd.edu.sg", + "aff_unique_abbr": "ShanghaiTech;;SIM;UCAS;SUTD", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0+0+0+0;0+0+0+0;0+0+0+0;0+0+0+0;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.findings-emnlp.14", + "title": "NarraSum: A Large-Scale Dataset for Abstractive Narrative Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Narrative summarization aims to produce a distilled version of a narrative to describe its most salient events and characters. Writing a summary for a narrative is challenging as it requires an understanding of event causality and character behaviors. To encourage research in this direction, we propose NarraSum, a large-scale narrative summarization dataset. It contains 122K narratives, which are collected from the synopses of movies and TV episodes with diverse genres, and their corresponding abstractive summaries. Experiments show that there is a large performance gap between humans and the state-of-the-art summarization models on NarraSum. We hope that this dataset will promote future research in summarization, as well as broader studies of natural language understanding and generation. The dataset is available at https://github.com/zhaochaocs/narrasum.", + "author": "Chao Zhao; Faeze Brahman; Kaiqiang Song; Wenlin Yao; Dian Yu; Snigdha Chaturvedi", + "authorids": "/c/chao-zhao/; /f/faeze-brahman/; /k/kaiqiang-song/; /w/wenlin-yao/; /d/dian-yu/; /s/snigdha-chaturvedi/", + "bibtex": "@inproceedings{zhao-etal-2022-narrasum,\n title = \"{N}arra{S}um: A Large-Scale Dataset for Abstractive Narrative Summarization\",\n author = \"Zhao, Chao and\n Brahman, Faeze and\n Song, Kaiqiang and\n Yao, Wenlin and\n Yu, Dian and\n Chaturvedi, Snigdha\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.14/\",\n doi = \"10.18653/v1/2022.findings-emnlp.14\",\n pages = \"182--197\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.14.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.14/", + "pdf_size": 720870, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6661097886049327064&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "UNC Chapel Hill; Allen Institute for AI+University of Washington; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; UNC Chapel Hill", + "aff_domain": "cs.unc.edu;allenai.org;global.tencent.com;global.tencent.com;global.tencent.com;cs.unc.edu", + "email": "cs.unc.edu;allenai.org;global.tencent.com;global.tencent.com;global.tencent.com;cs.unc.edu", + "github": "https://github.com/zhaochaocs/narrasum", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1+2;3;3;3;0", + "aff_unique_norm": "University of North Carolina at Chapel Hill;Allen Institute for AI;University of Washington;Tencent", + "aff_unique_dep": ";;;Tencent AI Lab", + "aff_unique_url": "https://www.unc.edu;https://allenai.org;https://www.washington.edu;https://ai.tencent.com", + "aff_unique_abbr": "UNC;AI2;UW;Tencent AI Lab", + "aff_campus_unique_index": "0;;0", + "aff_campus_unique": "Chapel Hill;", + "aff_country_unique_index": "0;0+0;1;1;1;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.261", + "title": "Narrate Dialogues for Better Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Dialogue summarization models aim to generate a concise and accurate summary for multi-party dialogue. The complexity of dialogue, including coreference, dialogue acts, and inter-speaker interactions bring unique challenges to dialogue summarization. Most recent neural models achieve state-of-art performance following the pretrain-then-finetune recipe, where the large-scale language model (LLM) is pretrained on large-scale single-speaker written text, but later finetuned on multi-speaker dialogue text. To mitigate the gap between pretraining and finetuning, we propose several approaches to convert the dialogue into a third-person narrative style and show that the narration serves as a valuable annotation for LLMs. Empirical results on three benchmark datasets show our simple approach achieves higher scores on the ROUGE and a factual correctness metric.", + "author": "Ruochen Xu; Chenguang Zhu; Michael Zeng", + "authorids": "/r/ruochen-xu/; /c/chenguang-zhu/; /m/michael-zeng/", + "bibtex": "@inproceedings{xu-etal-2022-narrate,\n title = \"Narrate Dialogues for Better Summarization\",\n author = \"Xu, Ruochen and\n Zhu, Chenguang and\n Zeng, Michael\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.261/\",\n doi = \"10.18653/v1/2022.findings-emnlp.261\",\n pages = \"3565--3575\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.261.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.261/", + "pdf_size": 390059, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13693656004566266847&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Azure Cognitive Services Research, Microsoft; Azure Cognitive Services Research, Microsoft; Azure Cognitive Services Research, Microsoft", + "aff_domain": "microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Microsoft", + "aff_unique_dep": "Azure Cognitive Services Research", + "aff_unique_url": "https://www.microsoft.com", + "aff_unique_abbr": "Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.358", + "title": "Natural Language Deduction through Search over Statement Compositions", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In settings from fact-checking to question answering, we frequently want to know whether a collection of evidence (premises) entails a hypothesis. Existing methods primarily focus on the end-to-end discriminative version of this task, but less work has treated the generative version in which a model searches over the space of statements entailed by the premises to constructively derive the hypothesis. We propose a system for doing this kind of deductive reasoning in natural language by decomposing the task into separate steps coordinated by a search procedure, producing a tree of intermediate conclusions that faithfully reflects the system\u2019s reasoning process. Our experiments on the EntailmentBank dataset (Dalvi et al., 2021) demonstrate that the proposed system can successfully prove true statements while rejecting false ones. Moreover, it produces natural language explanations with a 17% absolute higher step validity than those produced by an end-to-end T5 model.", + "author": "Kaj Bostrom; Zayne Sprague; Swarat Chaudhuri; Greg Durrett", + "authorids": "/k/kaj-bostrom/; /z/zayne-sprague/; /s/swarat-chaudhuri/; /g/greg-durrett/", + "bibtex": "@inproceedings{bostrom-etal-2022-natural,\n title = \"Natural Language Deduction through Search over Statement Compositions\",\n author = \"Bostrom, Kaj and\n Sprague, Zayne and\n Chaudhuri, Swarat and\n Durrett, Greg\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.358/\",\n doi = \"10.18653/v1/2022.findings-emnlp.358\",\n pages = \"4871--4883\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.358.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.358/", + "pdf_size": 984452, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13611003391927722629&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin", + "aff_domain": "cs.utexas.edu;utexas.edu;cs.utexas.edu;cs.utexas.edu", + "email": "cs.utexas.edu;utexas.edu;cs.utexas.edu;cs.utexas.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "The University of Texas at Austin", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.564", + "title": "Natural Language Deduction with Incomplete Information", + "track": "main", + "status": "Main", + "award": false, + "abstract": "A growing body of work studies how to answer a question or verify a claim by generating a natural language \u201cproof:\u201d a chain of deductive inferences yielding the answer based on a set of premises. However, these methods can only make sound deductions when they follow from evidence that is given. We propose a new system that can handle the underspecified setting where not all premises are stated at the outset; that is, additional assumptions need to be materialized to prove a claim. By using a natural language generation model to abductively infer a premise given another premise and a conclusion, we can impute missing pieces of evidence needed for the conclusion to be true. Our system searches over two fringes in a bidirectional fashion, interleaving deductive (forward-chaining) and abductive (backward-chaining) generation steps. We sample multiple possible outputs for each step to achieve coverage of the search space, at the same time ensuring correctness by filtering low-quality generations with a round-trip validation procedure. Results on a modified version of the EntailmentBank dataset and a new dataset called Everyday Norms: Why Not? Show that abductive generation with validation can recover premises across in- and out-of-domain settings.", + "author": "Zayne Sprague; Kaj Bostrom; Swarat Chaudhuri; Greg Durrett", + "authorids": "/z/zayne-sprague/; /k/kaj-bostrom/; /s/swarat-chaudhuri/; /g/greg-durrett/", + "bibtex": "@inproceedings{sprague-etal-2022-natural,\n title = \"Natural Language Deduction with Incomplete Information\",\n author = \"Sprague, Zayne and\n Bostrom, Kaj and\n Chaudhuri, Swarat and\n Durrett, Greg\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.564/\",\n doi = \"10.18653/v1/2022.emnlp-main.564\",\n pages = \"8230--8258\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.564.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.564/", + "pdf_size": 5653014, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6437130290641659601&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin", + "aff_domain": "utexas.edu;cs.utexas.edu;cs.utexas.edu;cs.utexas.edu", + "email": "utexas.edu;cs.utexas.edu;cs.utexas.edu;cs.utexas.edu", + "github": "https://github.com/Zayne-sprague/Natural_Language_Deduction_with_Incomplete_Information.git", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "The University of Texas at Austin", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.231", + "title": "Natural Language to Code Translation with Execution", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Generative models of code, pretrained on large corpora of programs, have shown great success in translating natural language to code (Chen et al., 2021; Austin et al., 2021; Li et al., 2022, inter alia). While these models do not explicitly incorporate program semantics (i.e., execution results) during training, they are able to generate correct solutions for many problems. However, choosing a single correct program from a generated set for each problem remains challenging. In this work, we introduce execution result\u2013based minimum Bayes risk decoding (MBR-EXEC) for program selection and show that it improves the few-shot performance of pretrained code models on natural-language-to-code tasks. We select output programs from a generated candidate set by marginalizing over program implementations that share the same semantics. Because exact equivalence is intractable, we execute each program on a small number of test inputs to approximate semantic equivalence. Across datasets, execution or simulated execution significantly outperforms the methods that do not involve program semantics. We find that MBR-EXEC consistently improves over all execution-unaware selection methods, suggesting it as an effective approach for natural language to code translation.", + "author": "Freda Shi; Daniel Fried; Marjan Ghazvininejad; Luke Zettlemoyer; Sida I. Wang", + "authorids": "/f/freda-shi/; /d/daniel-fried/; /m/marjan-ghazvininejad/; /l/luke-zettlemoyer/; /s/sida-i-wang/", + "bibtex": "@inproceedings{shi-etal-2022-natural,\n title = \"Natural Language to Code Translation with Execution\",\n author = \"Shi, Freda and\n Fried, Daniel and\n Ghazvininejad, Marjan and\n Zettlemoyer, Luke and\n Wang, Sida I.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.231/\",\n doi = \"10.18653/v1/2022.emnlp-main.231\",\n pages = \"3533--3546\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.231.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.231/", + "pdf_size": 610159, + "gs_citation": 112, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1281958609978677627&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Meta AI+Toyota Technological Institute at Chicago; Meta AI+Carnegie Mellon University; Meta AI; Meta AI+University of Washington; Meta AI", + "aff_domain": "ttic.edu;cs.cmu.edu;fb.com;fb.com;fb.com", + "email": "ttic.edu;cs.cmu.edu;fb.com;fb.com;fb.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+2;0;0+3;0", + "aff_unique_norm": "Meta Platforms, Inc.;Toyota Technological Institute at Chicago;Carnegie Mellon University;University of Washington", + "aff_unique_dep": "Meta AI;;;", + "aff_unique_url": "https://meta.com;https://www.tti-chicago.org;https://www.cmu.edu;https://www.washington.edu", + "aff_unique_abbr": "Meta;TTI Chicago;CMU;UW", + "aff_campus_unique_index": "1;;", + "aff_campus_unique": ";Chicago", + "aff_country_unique_index": "0+0;0+0;0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.411", + "title": "Natural Logic-guided Autoregressive Multi-hop Document Retrieval for Fact Verification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "A key component of fact verification is the evidence retrieval, often from multiple documents. Recent approaches use dense representations and condition the retrieval of each document on the previously retrieved ones. The latter step is performed over all the documents in the collection, requiring storing their dense representations in an index, thus incurring a high memory footprint. An alternative paradigm is retrieve-and-rerank, where documents are retrieved using methods such as BM25, their sentences are reranked, and further documents are retrieved conditioned on these sentences, reducing the memory requirements. However, such approaches can be brittle as they rely on heuristics and assume hyperlinks between documents.We propose a novel retrieve-and-rerank method for multi-hop retrieval, that consists of a retriever that jointly scores documents in the knowledge source and sentences from previously retrieved documents using an autoregressive formulation and is guided by a proof system based on natural logic that dynamically terminates the retrieval process if the evidence is deemed sufficient.This method exceeds or is on par with the current state-of-the-art on FEVER, HoVer and FEVEROUS-S, while using 5 to 10 times less memory than competing systems. Evaluation on an adversarial dataset indicates improved stability of our approach compared to commonly deployed threshold-based methods. Finally, the proof system helps humans predict model decisions correctly more often than using the evidence alone.", + "author": "Rami Aly; Andreas Vlachos", + "authorids": "/r/rami-aly/; /a/andreas-vlachos/", + "bibtex": "@inproceedings{aly-vlachos-2022-natural,\n title = \"Natural Logic-guided Autoregressive Multi-hop Document Retrieval for Fact Verification\",\n author = \"Aly, Rami and\n Vlachos, Andreas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.411/\",\n doi = \"10.18653/v1/2022.emnlp-main.411\",\n pages = \"6123--6135\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.411.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.411/", + "pdf_size": 352731, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1885721268169822097&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Cambridge Department of Computer Science and Technology; University of Cambridge Department of Computer Science and Technology", + "aff_domain": "cl.cam.ac.uk;cl.cam.ac.uk", + "email": "cl.cam.ac.uk;cl.cam.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Cambridge", + "aff_unique_dep": "Department of Computer Science and Technology", + "aff_unique_url": "https://www.cam.ac.uk", + "aff_unique_abbr": "Cambridge", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.findings-emnlp.413", + "title": "NaturalAdversaries: Can Naturalistic Adversaries Be as Effective as Artificial Adversaries?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While a substantial body of prior work has explored adversarial example generation for natural language understanding tasks, these examples are often unrealistic and diverge from the real-world data distributions. In this work, we introduce a two-stage adversarial example generation framework (NaturalAdversaries), for designing adversaries that are effective at fooling a given classifier and demonstrate natural-looking failure cases that could plausibly occur during in-the-wild deployment of the models. At the first stage a token attribution method is used to summarize a given classifier\u2019s behavior as a function of the key tokens in the input. In the second stage a generative model is conditioned on the key tokens from the first stage. NaturalAdversaries is adaptable to both black-box and white-box adversarial attacks based on the level of access to the model parameters. Our results indicate these adversaries generalize across domains, and offer insights for future research on improving robustness of neural text classification models.", + "author": "Saadia Gabriel; Hamid Palangi; Yejin Choi", + "authorids": "/s/saadia-gabriel/; /h/hamid-palangi/; /y/yejin-choi/", + "bibtex": "@inproceedings{gabriel-etal-2022-naturaladversaries,\n title = \"{N}atural{A}dversaries: Can Naturalistic Adversaries Be as Effective as Artificial Adversaries?\",\n author = \"Gabriel, Saadia and\n Palangi, Hamid and\n Choi, Yejin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.413/\",\n doi = \"10.18653/v1/2022.findings-emnlp.413\",\n pages = \"5635--5645\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.413.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.413/", + "pdf_size": 312481, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5419203067359618464&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington; Microsoft Research; Allen Institute for Artificial Intelligence", + "aff_domain": "cs.washington.edu;microsoft.com;cs.washington.edu", + "email": "cs.washington.edu;microsoft.com;cs.washington.edu", + "github": "https://github.com/skgabriel/NaturalAdversaries", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "University of Washington;Microsoft Corporation;Allen Institute for Artificial Intelligence", + "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering;Microsoft Research;", + "aff_unique_url": "https://www.washington.edu;https://www.microsoft.com/en-us/research;https://allenai.org", + "aff_unique_abbr": "UW;MSR;AI2", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Seattle;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.160", + "title": "Navigating Connected Memories with a Task-oriented Dialog System", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent years have seen an increasing trend in the volume of personal media captured by users, thanks to the advent of smartphones and smart glasses, resulting in large media collections. Despite conversation being an intuitive human-computer interface, current efforts focus mostly on single-shot natural language based media retrieval to aid users query their media and re-live their memories. This severely limits the search functionality as users can neither ask follow-up queries nor obtain information without first formulating a single-turn query.In this work, we propose dialogs for connected memories as a powerful tool to empower users to search their media collection through a multi-turn, interactive conversation. Towards this, we collect a new task-oriented dialog dataset COMET, which contains 11.5k user\u2194assistant dialogs (totalling 103k utterances), grounded in simulated personal memory graphs. We employ a resource-efficient, two-phase data collection pipeline that uses: (1) a novel multimodal dialog simulator that generates synthetic dialog flows grounded in memory graphs, and, (2) manual paraphrasing to obtain natural language utterances. We analyze COMET, formulate four main tasks to benchmark meaningful progress, and adopt state-of-the-art language models as strong baselines, in order to highlight the multimodal challenges captured by our dataset.", + "author": "Satwik Kottur; Seungwhan Moon; Alborz Geramifard; Babak Damavandi", + "authorids": "/s/satwik-kottur/; /s/seungwhan-moon/; /a/alborz-geramifard/; /b/babak-damavandi/", + "bibtex": "@inproceedings{kottur-etal-2022-navigating,\n title = \"Navigating Connected Memories with a Task-oriented Dialog System\",\n author = \"Kottur, Satwik and\n Moon, Seungwhan and\n Geramifard, Alborz and\n Damavandi, Babak\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.160/\",\n doi = \"10.18653/v1/2022.emnlp-main.160\",\n pages = \"2495--2507\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.160.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.160/", + "pdf_size": 6689144, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11579439234089522753&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 0, + "aff": "Meta Reality Labs & Meta AI; Meta Reality Labs & Meta AI; Meta Reality Labs & Meta AI; Meta Reality Labs & Meta AI", + "aff_domain": "fb.com;fb.com;fb.com;fb.com", + "email": "fb.com;fb.com;fb.com;fb.com", + "github": "github.com/facebookresearch/comet_memory_dialog", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Meta", + "aff_unique_dep": "Reality Labs & AI", + "aff_unique_url": "https://www.meta.com", + "aff_unique_abbr": "Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.135", + "title": "Near-Negative Distinction: Giving a Second Life to Human Evaluation Datasets", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Precisely assessing the progress in natural language generation (NLG) tasks is challenging, and human evaluation to establish a preference in a model\u2019s output over another is often necessary.However, human evaluation is usually costly, difficult to reproduce, and non-reusable.In this paper, we propose a new and simple automatic evaluation method for NLG called Near-Negative Distinction (NND) that repurposes prior human annotations into NND tests.In an NND test, an NLG model must place a higher likelihood on a high-quality output candidate than on a near-negative candidate with a known error.Model performance is established by the number of NND tests a model passes, as well as the distribution over task-specific errors the model fails on.Through experiments on three NLG tasks (question generation, question answering, and summarization), we show that NND achieves a higher correlation with human judgments than standard NLG evaluation metrics. We then illustrate NND evaluation in four practical scenarios, for example performing fine-grain model analysis, or studying model training dynamics. Our findings suggest that NND can give a second life to human annotations and provide low-cost NLG evaluation.", + "author": "Philippe Laban; Chien-Sheng Wu; Wenhao Liu; Caiming Xiong", + "authorids": "/p/philippe-laban/; /c/chien-sheng-wu/; /w/wenhao-liu/; /c/caiming-xiong/", + "bibtex": "@inproceedings{laban-etal-2022-near,\n title = \"Near-Negative Distinction: Giving a Second Life to Human Evaluation Datasets\",\n author = \"Laban, Philippe and\n Wu, Chien-Sheng and\n Liu, Wenhao and\n Xiong, Caiming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.135/\",\n doi = \"10.18653/v1/2022.emnlp-main.135\",\n pages = \"2094--2108\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.135.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.135/", + "pdf_size": 670769, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3954821545493498496&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Salesforce AI Research; Salesforce AI Research; Salesforce AI Research; Salesforce AI Research", + "aff_domain": "salesforce.com;salesforce.com;salesforce.com;salesforce.com", + "email": "salesforce.com;salesforce.com;salesforce.com;salesforce.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Salesforce", + "aff_unique_dep": "Salesforce AI Research", + "aff_unique_url": "https://www.salesforce.com", + "aff_unique_abbr": "Salesforce AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.214", + "title": "Nearest Neighbor Zero-Shot Inference", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Retrieval-augmented language models (LMs) use non-parametric memory to substantially outperform their non-retrieval counterparts on perplexity-based evaluations, but it is an open question whether they achieve similar gains in few- and zero-shot end-task accuracy. We extensively study one such model, the k-nearest neighbor LM (kNN-LM), showing that the gains marginally transfer. The main challenge is to achieve coverage of the verbalizer tokens that define the different end-task class labels. To address this challenge, we also introduce kNN-Prompt, a simple and effective kNN-LM with automatically expanded fuzzy verbalizers (e.g. to expand \u201cterrible\u201d to also include \u201csilly\u201d and other task-specific synonyms for sentiment classification). Across nine diverse end-tasks, using kNN-Prompt with GPT-2 large yields significant performance boosts over strong zeroshot baselines (13.4% absolute improvement over the base LM on average). We also show that other advantages of non-parametric augmentation hold for end tasks; kNN-Prompt is effective for domain adaptation with no further training, and gains increase with the size of the retrieval model.", + "author": "Weijia Shi; Julian Michael; Suchin Gururangan; Luke Zettlemoyer", + "authorids": "/w/weijia-shi/; /j/julian-michael/; /s/suchin-gururangan/; /l/luke-zettlemoyer/", + "bibtex": "@inproceedings{shi-etal-2022-nearest,\n title = \"Nearest Neighbor Zero-Shot Inference\",\n author = \"Shi, Weijia and\n Michael, Julian and\n Gururangan, Suchin and\n Zettlemoyer, Luke\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.214/\",\n doi = \"10.18653/v1/2022.emnlp-main.214\",\n pages = \"3254--3265\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.214.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.214/", + "pdf_size": 832323, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14811691053298481481&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.802", + "title": "Neighborhood Contrastive Learning for Scientific Document Representations with Citation Embeddings", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Learning scientific document representations can be substantially improved through contrastive learning objectives, where the challenge lies in creating positive and negative training samples that encode the desired similarity semantics. Prior work relies on discrete citation relations to generate contrast samples. However, discrete citations enforce a hard cut-off to similarity. This is counter-intuitive to similarity-based learning and ignores that scientific papers can be very similar despite lacking a direct citation - a core problem of finding related research. Instead, we use controlled nearest neighbor sampling over citation graph embeddings for contrastive learning. This control allows us to learn continuous similarity, to sample hard-to-learn negatives and positives, and also to avoid collisions between negative and positive samples by controlling the sampling margin between them. The resulting method SciNCL outperforms the state-of-the-art on the SciDocs benchmark. Furthermore, we demonstrate that it can train (or tune) language models sample-efficiently and that it can be combined with recent training-efficient methods. Perhaps surprisingly, even training a general-domain language model this way outperforms baselines pretrained in-domain.", + "author": "Malte Ostendorff; Nils Rethmeier; Isabelle Augenstein; Bela Gipp; Georg Rehm", + "authorids": "/m/malte-ostendorff/; /n/nils-rethmeier/; /i/isabelle-augenstein/; /b/bela-gipp/; /g/georg-rehm/", + "bibtex": "@inproceedings{ostendorff-etal-2022-neighborhood,\n title = \"Neighborhood Contrastive Learning for Scientific Document Representations with Citation Embeddings\",\n author = \"Ostendorff, Malte and\n Rethmeier, Nils and\n Augenstein, Isabelle and\n Gipp, Bela and\n Rehm, Georg\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.802/\",\n doi = \"10.18653/v1/2022.emnlp-main.802\",\n pages = \"11670--11688\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.802.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.802/", + "pdf_size": 544236, + "gs_citation": 89, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18424617678496400674&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "DFKI GmbH + University of G\u00f6ttingen; DFKI GmbH + University of Copenhagen; University of Copenhagen; University of G\u00f6ttingen; DFKI GmbH", + "aff_domain": "dfki.de;dfki.de;di.ku.dk;uni-goettingen.de;dfki.de", + "email": "dfki.de;dfki.de;di.ku.dk;uni-goettingen.de;dfki.de", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+2;2;1;0", + "aff_unique_norm": "Deutsches Forschungszentrum f\u00fcr K\u00fcnstliche Intelligenz GmbH;University of G\u00f6ttingen;University of Copenhagen", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.dfki.de;https://www.uni-goettingen.de;https://www.ku.dk", + "aff_unique_abbr": "DFKI;Georg-August-Universit\u00e4t G\u00f6ttingen;UCPH", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+1;1;0;0", + "aff_country_unique": "Germany;Denmark" + }, + { + "id": "2022.emnlp-main.235", + "title": "Neural Machine Translation with Contrastive Translation Memories", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Retrieval-augmented Neural Machine Translation models have been successful in many translation scenarios. Different from previous works that make use of mutually similar but redundant translation memories (TMs), we propose a new retrieval-augmented NMT to model contrastively retrieved translation memories that are holistically similar to the source sentence while individually contrastive to each other providing maximal information gain in three phases. First, in TM retrieval phase, we adopt contrastive retrieval algorithm to avoid redundancy and uninformativeness of similar translation pieces. Second, in memory encoding stage, given a set of TMs we propose a novel Hierarchical Group Attention module to gather both local context of each TM and global context of the whole TM set. Finally, in training phase, a Multi-TM contrastive learning objective is introduced to learn salient feature of each TM with respect to target sentence. Experimental results show that our framework obtains substantial improvements over strong baselines in the benchmark dataset.", + "author": "Xin Cheng; Shen Gao; Lemao Liu; Dongyan Zhao; Rui Yan", + "authorids": "/x/xin-cheng/; /s/shen-gao/; /l/lemao-liu/; /d/dongyan-zhao/; /r/rui-yan/", + "bibtex": "@inproceedings{cheng-etal-2022-neural,\n title = \"Neural Machine Translation with Contrastive Translation Memories\",\n author = \"Cheng, Xin and\n Gao, Shen and\n Liu, Lemao and\n Zhao, Dongyan and\n Yan, Rui\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.235/\",\n doi = \"10.18653/v1/2022.emnlp-main.235\",\n pages = \"3591--3601\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.235.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.235/", + "pdf_size": 820961, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2329519993251742647&as_sdt=5,36&sciodt=0,36&hl=en", + "gs_version_total": 3, + "aff": "Wangxuan Institute of Computer Technology, Peking University+Center for Data Science, Peking University; School of Computer Science and Technology, Shandong University; Tencent AI Lab; Wangxuan Institute of Computer Technology, Peking University+State Key Laboratory of Media Convergence Production Technology and Systems+Beijing Institute of General Artificial Intelligence (BIGAI); Gaoling School of Artificial Intelligence, Renmin University of China", + "aff_domain": "stu.pku.edu.cn;sdu.edu.cn;tencent.com;pku.edu.cn;ruc.edu.cn", + "email": "stu.pku.edu.cn;sdu.edu.cn;tencent.com;pku.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;1;2;0+3+4;5", + "aff_unique_norm": "Peking University;Shandong University;Tencent;State Key Laboratory of Media Convergence Production Technology and Systems;Beijing Institute of General Artificial Intelligence;Renmin University of China", + "aff_unique_dep": "Wangxuan Institute of Computer Technology;School of Computer Science and Technology;Tencent AI Lab;;;Gaoling School of Artificial Intelligence", + "aff_unique_url": "http://www.pku.edu.cn;http://www.sdu.edu.cn;https://ai.tencent.com;;http://www.bigmodel.cn/;http://www.ruc.edu.cn", + "aff_unique_abbr": "PKU;;Tencent AI Lab;;BIGAI;RUC", + "aff_campus_unique_index": "1;;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0;0;0+0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.248", + "title": "Neural Theory-of-Mind? On the Limits of Social Intelligence in Large LMs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Social intelligence and Theory of Mind (TOM), i.e., the ability to reason about the different mental states, intents, and reactions of all people involved, allows humans to effectively navigate and understand everyday social interactions. As NLP systems are used in increasingly complex social situations, their ability to grasp social dynamics becomes crucial.In this work, we examine the open question of social intelligence and Theory of Mind in modern NLP systems from an empirical and theorybased perspective. We show that one of today\u2019s largest language models (GPT-3; Brown et al., 2020) lacks this kind of social intelligence out-of-the box, using two tasks: SocialIQa (Sap et al., 2019), which measure models\u2019 ability to understand intents and reactions of participants of social interactions, and ToMi (Le, Boureau, and Nickel, 2019), which measures whether models can infer mental states and realities of participants of situations.Our results show that models struggle substantially at these Theory of Mind tasks, with well-below-human accuracies of 55% and 60% on SocialIQa and ToMi, respectively. To conclude, we draw on theories from pragmatics to contextualize this shortcoming of large language models, by examining the limitations stemming from their data, neural architecture, and training paradigms. Challenging the prevalent narrative that only scale is needed, we posit that person-centric NLP approaches might be more effective towards neural Theory of Mind.", + "author": "Maarten Sap; Ronan Le Bras; Daniel Fried; Yejin Choi", + "authorids": "/m/maarten-sap/; /r/ronan-le-bras/; /d/daniel-fried/; /y/yejin-choi/", + "bibtex": "@inproceedings{sap-etal-2022-neural,\n title = \"Neural Theory-of-Mind? On the Limits of Social Intelligence in Large {LM}s\",\n author = \"Sap, Maarten and\n Le Bras, Ronan and\n Fried, Daniel and\n Choi, Yejin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.248/\",\n doi = \"10.18653/v1/2022.emnlp-main.248\",\n pages = \"3762--3780\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.248.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.248/", + "pdf_size": 702919, + "gs_citation": 226, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=588004431319407027&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 5, + "aff": "Allen Institute for AI, Seattle, WA, USA\u2660; Language Technologies Institute, Carnegie Mellon University, Pittsburgh, USA\u2666; Allen Institute for AI, Seattle, WA, USA\u2660; Paul G. Allen School of Computer Science, University of Washington, Seattle, WA, USA\u2665", + "aff_domain": "cmu.edu; ; ; ", + "email": "cmu.edu; ; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "Allen Institute for AI;Carnegie Mellon University;University of Washington", + "aff_unique_dep": ";Language Technologies Institute;Paul G. Allen School of Computer Science", + "aff_unique_url": "https://allenai.org;https://www.cmu.edu;https://www.washington.edu", + "aff_unique_abbr": "AI2;CMU;UW", + "aff_campus_unique_index": "0;1;0;0", + "aff_campus_unique": "Seattle;Pittsburgh", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.314", + "title": "Neural-Symbolic Inference for Robust Autoregressive Graph Parsing via Compositional Uncertainty Quantification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained seq2seq models excel at graph semantic parsing with rich annotated data, but generalize worse to out-of-distribution (OOD) and long-tail examples. In comparison, symbolic parsers under-perform on population-level metrics, but exhibit unique strength in OOD and tail generalization. In this work, we study compositionality-aware approach to neural-symbolic inference informed by model confidence, performing fine-grained neural-symbolic reasoning at subgraph level (i.e., nodes and edges) and precisely targeting subgraph components with high uncertainty in the neural parser. As a result, the method combines the distinct strength of the neural and symbolic approaches in capturing different aspects of the graph prediction, leading to well-rounded generalization performance both across domains and in the tail. We empirically investigate the approach in the English Resource Grammar (ERG) parsing problem on a diverse suite of standard in-domain and seven OOD corpora. Our approach leads to 35.26% and 35.60% error reduction in aggregated SMATCH score over neural and symbolic approaches respectively, and 14% absolute accuracy gain in key tail linguistic categories over the neural model, outperforming prior state-of-art methods that do not account for compositionality or uncertainty.", + "author": "Zi Lin; Jeremiah Liu; Jingbo Shang", + "authorids": "/z/zi-lin/; /j/jeremiah-liu/; /j/jingbo-shang/", + "bibtex": "@inproceedings{lin-etal-2022-neural,\n title = \"Neural-Symbolic Inference for Robust Autoregressive Graph Parsing via Compositional Uncertainty Quantification\",\n author = \"Lin, Zi and\n Liu, Jeremiah and\n Shang, Jingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.314/\",\n doi = \"10.18653/v1/2022.emnlp-main.314\",\n pages = \"4759--4776\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.314.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.314/", + "pdf_size": 601134, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9863118750687859363&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "UC San Diego; Google Research + Harvard University; UC San Diego", + "aff_domain": "ucsd.edu;google.com;ucsd.edu", + "email": "ucsd.edu;google.com;ucsd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;0", + "aff_unique_norm": "University of California, San Diego;Google;Harvard University", + "aff_unique_dep": ";Google Research;", + "aff_unique_url": "https://www.ucsd.edu;https://research.google;https://www.harvard.edu", + "aff_unique_abbr": "UCSD;Google Research;Harvard", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "San Diego;Mountain View;", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.194", + "title": "Neural-based Mixture Probabilistic Query Embedding for Answering FOL queries on Knowledge Graphs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Query embedding (QE)\u2014which aims to embed entities and first-order logical (FOL) queries in a vector space, has shown great power in answering FOL queries on knowledge graphs (KGs). Existing QE methods divide a complex query into a sequence of mini-queries according to its computation graph and perform logical operations on the answer sets of mini-queries to get answers. However, most of them assume that answer sets satisfy an individual distribution (e.g., Uniform, Beta, or Gaussian), which is often violated in real applications and limit their performance. In this paper, we propose a Neural-based Mixture Probabilistic Query Embedding Model (NMP-QEM) that encodes the answer set of each mini-query as a mixed Gaussian distribution with multiple means and covariance parameters, which can approximate any random distribution arbitrarily well in real KGs. Additionally, to overcome the difficulty in defining the closed solution of negation operation, we introduce neural-based logical operators of projection, intersection and negation for a mixed Gaussian distribution to answer all the FOL queries. Extensive experiments demonstrate that NMP-QEM significantly outperforms existing state-of-the-art methods on benchmark datasets. In NELL995, NMP-QEM achieves a 31% relative improvement over the state-of-the-art.", + "author": "Xiao Long; Liansheng Zhuang; Li Aodi; Shafei Wang; Houqiang Li", + "authorids": "/x/xiao-long/; /l/liansheng-zhuang/; /l/li-aodi/; /s/shafei-wang/; /h/houqiang-li/", + "bibtex": "@inproceedings{long-etal-2022-neural,\n title = \"Neural-based Mixture Probabilistic Query Embedding for Answering {FOL} queries on Knowledge Graphs\",\n author = \"Long, Xiao and\n Zhuang, Liansheng and\n Aodi, Li and\n Wang, Shafei and\n Li, Houqiang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.194/\",\n doi = \"10.18653/v1/2022.emnlp-main.194\",\n pages = \"3001--3013\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.194.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.194/", + "pdf_size": 1206495, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18117369946876043699&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "University of Science and Technology of China; University of Science and Technology of China; University of Science and Technology of China; Peng Cheng Laboratory; University of Science and Technology of China", + "aff_domain": "mail.ustc.edu.cn;ustc.edu.cn; ; ; ", + "email": "mail.ustc.edu.cn;ustc.edu.cn; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "University of Science and Technology of China;Peng Cheng Laboratory", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.ustc.edu.cn;http://www.pcl.ac.cn", + "aff_unique_abbr": "USTC;PCL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.371", + "title": "NeuroCounterfactuals: Beyond Minimal-Edit Counterfactuals for Richer Data Augmentation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While counterfactual data augmentation offers a promising step towards robust generalization in natural language processing, producing a set of counterfactuals that offer valuable inductive bias for models remains a challenge. Most existing approaches for producing counterfactuals, manual or automated, rely on small perturbations via minimal edits, resulting in simplistic changes. We introduce NeuroCounterfactuals, designed as loose counterfactuals, allowing for larger edits which result in naturalistic generations containing linguistic diversity, while still bearing similarity to the original document. Our novel generative approach bridges the benefits of constrained decoding, with those of language model adaptation for sentiment steering. Training data augmentation with our generations results in both in-domain and out-of-domain improvements for sentiment classification, outperforming even manually curated counterfactuals, under select settings. We further present detailed analyses to show the advantages of NeuroCounterfactuals over approaches involving simple, minimal edits.", + "author": "Phillip Howard; Gadi Singer; Vasudev Lal; Yejin Choi; Swabha Swayamdipta", + "authorids": "/p/phillip-howard/; /g/gadi-singer/; /v/vasudev-lal/; /y/yejin-choi/; /s/swabha-swayamdipta/", + "bibtex": "@inproceedings{howard-etal-2022-neurocounterfactuals,\n title = \"{N}euro{C}ounterfactuals: Beyond Minimal-Edit Counterfactuals for Richer Data Augmentation\",\n author = \"Howard, Phillip and\n Singer, Gadi and\n Lal, Vasudev and\n Choi, Yejin and\n Swayamdipta, Swabha\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.371/\",\n doi = \"10.18653/v1/2022.findings-emnlp.371\",\n pages = \"5056--5072\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.371.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.371/", + "pdf_size": 741829, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5566185119559851260&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Intel Labs; Intel Labs; Intel Labs; Allen Institute for AI + Paul G. Allen School of Computer Science & Engineering, University of Washington; Allen Institute for AI + University of Southern California", + "aff_domain": "intel.com; ; ; ; ", + "email": "intel.com; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1+2;1+3", + "aff_unique_norm": "Intel Corporation;Allen Institute for AI;University of Washington;University of Southern California", + "aff_unique_dep": "Intel Labs;;Paul G. Allen School of Computer Science & Engineering;", + "aff_unique_url": "https://www.intel.com;https://allenai.org;https://www.washington.edu;https://www.usc.edu", + "aff_unique_abbr": "Intel;AI2;UW;USC", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Seattle;Los Angeles", + "aff_country_unique_index": "0;0;0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.403", + "title": "NewsClaims: A New Benchmark for Claim Detection from News with Attribute Knowledge", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Claim detection and verification are crucial for news understanding and have emerged as promising technologies for mitigating misinformation and disinformation in the news. However, most existing work has focused on claim sentence analysis while overlooking additional crucial attributes (e.g., the claimer and the main object associated with the claim).In this work, we present NewsClaims, a new benchmark for attribute-aware claim detection in the news domain. We extend the claim detection problem to include extraction of additional attributes related to each claim and release 889 claims annotated over 143 news articles. NewsClaims aims to benchmark claim detection systems in emerging scenarios, comprising unseen topics with little or no training data. To this end, we see that zero-shot and prompt-based baselines show promising performance on this benchmark, while still considerably behind human performance.", + "author": "Revanth Gangi Reddy; Sai Chetan Chinthakindi; Zhenhailong Wang; Yi Fung; Kathryn Conger; Ahmed ELsayed; Martha Palmer; Preslav Nakov; Eduard Hovy; Kevin Small; Heng Ji", + "authorids": "/r/revanth-gangi-reddy/; /s/sai-chetan-chinthakindi/; /z/zhenhailong-wang/; /y/yi-fung/; /k/kathryn-conger/; /a/ahmed-elsayed/; /m/martha-palmer/; /p/preslav-nakov/; /e/eduard-hovy/; /k/kevin-small/; /h/heng-ji/", + "bibtex": "@inproceedings{gangi-reddy-etal-2022-newsclaims,\n title = \"{N}ews{C}laims: A New Benchmark for Claim Detection from News with Attribute Knowledge\",\n author = \"Gangi Reddy, Revanth and\n Chinthakindi, Sai Chetan and\n Wang, Zhenhailong and\n Fung, Yi and\n Conger, Kathryn and\n ELsayed, Ahmed and\n Palmer, Martha and\n Nakov, Preslav and\n Hovy, Eduard and\n Small, Kevin and\n Ji, Heng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.403/\",\n doi = \"10.18653/v1/2022.emnlp-main.403\",\n pages = \"6002--6018\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.403.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.403/", + "pdf_size": 1830771, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1684351304044066589&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 7, + "aff": "UIUC+DARPA+CMU; UIUC+DARPA+CMU; UIUC+DARPA+CMU; UIUC+DARPA+CMU; CU Boulder; CU Boulder; CU Boulder; MBZUAI; DARPA+CMU; Amazon; UIUC+DARPA+CMU", + "aff_domain": "illinois.edu; ; ; ; ; ; ; ; ; ;illinois.edu", + "email": "illinois.edu; ; ; ; ; ; ; ; ; ;illinois.edu", + "github": "https://github.com/blender-nlp/NewsClaims", + "project": "", + "author_num": 11, + "aff_unique_index": "0+1+2;0+1+2;0+1+2;0+1+2;3;3;3;4;1+2;5;0+1+2", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;Defense Advanced Research Projects Agency;Carnegie Mellon University;University of Colorado Boulder;Mohamed Bin Zayed University of Artificial Intelligence;Amazon.com, Inc.", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "https://www illinois.edu;https://www.darpa.mil;https://www.cmu.edu;https://www.colorado.edu;https://www.mbzuai.ac.ae;https://www.amazon.com", + "aff_unique_abbr": "UIUC;DARPA;CMU;CU Boulder;MBZUAI;Amazon", + "aff_campus_unique_index": "0;0;0;0;2;2;2;;0", + "aff_campus_unique": "Urbana-Champaign;;Boulder", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;0+0+0;0;0;0;1;0+0;0;0+0+0", + "aff_country_unique": "United States;United Arab Emirates" + }, + { + "id": "2022.findings-emnlp.152", + "title": "No Word Embedding Model Is Perfect: Evaluating the Representation Accuracy for Social Bias in the Media", + "track": "main", + "status": "finding", + "award": false, + "abstract": "News articles both shape and reflect public opinion across the political spectrum. Analyzing them for social bias can thus provide valuable insights, such as prevailing stereotypes in society and the media, which are often adopted by NLP models trained on respective data. Recent work has relied on word embedding bias measures, such as WEAT. However, several representation issues of embeddings can harm the measures\u2019 accuracy, including low-resource settings and token frequency differences. In this work, we study what kind of embedding algorithm serves best to accurately measure types of social bias known to exist in US online news articles. To cover the whole spectrum of political bias in the US, we collect 500k articles and review psychology literature with respect to expected social bias. We then quantify social bias using WEAT along with embedding algorithms that account for the aforementioned issues. We compare how models trained with the algorithms on news articles represent the expected social bias. Our results suggest that the standard way to quantify bias does not align well with knowledge from psychology. While the proposed algorithms reduce the gap, they still do not fully match the literature.", + "author": "Maximilian Splieth\u00f6ver; Maximilian Keiff; Henning Wachsmuth", + "authorids": "/m/maximilian-spliethover/; /m/maximilian-keiff/; /h/henning-wachsmuth/", + "bibtex": "@inproceedings{spliethover-etal-2022-word,\n title = \"No Word Embedding Model Is Perfect: Evaluating the Representation Accuracy for Social Bias in the Media\",\n author = {Splieth{\\\"o}ver, Maximilian and\n Keiff, Maximilian and\n Wachsmuth, Henning},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.152/\",\n doi = \"10.18653/v1/2022.findings-emnlp.152\",\n pages = \"2081--2093\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.152.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.152/", + "pdf_size": 594846, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10304575174693349135&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Leibniz University Hannover, Institute of Artificial Intelligence; Universit\u00e4t Hamburg, Department of Informatics; Leibniz University Hannover, Institute of Artificial Intelligence", + "aff_domain": "ai.uni-hannover.de;studium.uni-hamburg.de;ai.uni-hannover.de", + "email": "ai.uni-hannover.de;studium.uni-hamburg.de;ai.uni-hannover.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Leibniz University Hannover;Universit\u00e4t Hamburg", + "aff_unique_dep": "Institute of Artificial Intelligence;Department of Informatics", + "aff_unique_url": "https://www.uni-hannover.de;https://www.uni-hamburg.de", + "aff_unique_abbr": "LUH;UHH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.179", + "title": "Non-Autoregressive Neural Machine Translation: A Call for Clarity", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Non-autoregressive approaches aim to improve the inference speed of translation models by only requiring a single forward pass to generate the output sequence instead of iteratively producing each predicted token. Consequently, their translation quality still tends to be inferior to their autoregressive counterparts due to several issues involving output token interdependence. In this work, we take a step back and revisit several techniques that have been proposed for improving non-autoregressive translation models and compare their combined translation quality and speed implications under third-party testing environments. We provide novel insights for establishing strong baselines using length prediction or CTC-based architecture variants and contribute standardized BLEU, chrF++, and TER scores using sacreBLEU on four translation tasks, which crucially have been missing as inconsistencies in the use of tokenized BLEU lead to deviations of up to 1.7 BLEU points. Our open-sourced code is integrated into fairseq for reproducibility.", + "author": "Robin Schmidt; Telmo Pires; Stephan Peitz; Jonas L\u00f6\u00f6f", + "authorids": "/r/robin-schmidt/; /t/telmo-pires/; /s/stephan-peitz/; /j/jonas-loof/", + "bibtex": "@inproceedings{schmidt-etal-2022-non,\n title = \"Non-Autoregressive Neural Machine Translation: A Call for Clarity\",\n author = {Schmidt, Robin and\n Pires, Telmo and\n Peitz, Stephan and\n L{\\\"o}{\\\"o}f, Jonas},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.179/\",\n doi = \"10.18653/v1/2022.emnlp-main.179\",\n pages = \"2785--2799\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.179.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.179/", + "pdf_size": 672825, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10409850692631214695&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Apple; Apple; Apple; Apple", + "aff_domain": "apple.com;apple.com;apple.com;apple.com", + "email": "apple.com;apple.com;apple.com;apple.com", + "github": "https://github.com/facebookresearch/fairseq/pull/4431", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Apple Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.apple.com", + "aff_unique_abbr": "Apple", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.21", + "title": "Non-Parametric Domain Adaptation for End-to-End Speech Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The end-to-end speech translation (E2E-ST) has received increasing attention due to the potential of its less error propagation, lower latency and fewer parameters. However, the effectiveness of neural-based approaches to this task is severely limited by the available training corpus, especially for domain adaptation where in-domain triplet data is scarce or nonexistent. In this paper, we propose a novel non-parametric method that leverages in-domain text translation corpus to achieve domain adaptation for E2E-ST systems. To this end, we first incorporate an additional encoder into the pre-trained E2E-ST model to realize text translation modeling, based on which the decoder\u2019s output representations for text and speech translation tasks are unified by reducing the correspondent representation mismatch in available triplet training data. During domain adaptation, a k-nearest-neighbor (kNN) classifier is introduced to produce the final translation distribution using the external datastore built by the domain-specific text translation corpus, while the universal output representation is adopted to perform a similarity search. Experiments on the Europarl-ST benchmark demonstrate that when in-domain text translation data is involved only, our proposed approach significantly improves baseline by 12.82 BLEU on average in all translation directions, even outperforming the strong in-domain fine-tuning strategy.", + "author": "Yichao Du; Weizhi Wang; Zhirui Zhang; Boxing Chen; Tong Xu; Jun Xie; Enhong Chen", + "authorids": "/y/yichao-du/; /w/weizhi-wang/; /z/zhirui-zhang/; /b/boxing-chen/; /t/tong-xu/; /j/jun-xie/; /e/enhong-chen/", + "bibtex": "@inproceedings{du-etal-2022-non,\n title = \"Non-Parametric Domain Adaptation for End-to-End Speech Translation\",\n author = \"Du, Yichao and\n Wang, Weizhi and\n Zhang, Zhirui and\n Chen, Boxing and\n Xu, Tong and\n Xie, Jun and\n Chen, Enhong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.21/\",\n doi = \"10.18653/v1/2022.emnlp-main.21\",\n pages = \"306--320\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.21.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.21/", + "pdf_size": 993536, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14528533073021966772&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; University of California, Santa Barbara; Tencent AI Lab; Machine Intelligence Technology Lab, Alibaba DAMO Academy; University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence; Machine Intelligence Technology Lab, Alibaba DAMO Academy; University of Science and Technology of China+State Key Laboratory of Cognitive Intelligence+Tencent AI Lab", + "aff_domain": "mail.ustc.edu.cn;ucsb.edu;gmail.com;alibaba-inc.com;ustc.edu.cn;alibaba-inc.com;ustc.edu.cn", + "email": "mail.ustc.edu.cn;ucsb.edu;gmail.com;alibaba-inc.com;ustc.edu.cn;alibaba-inc.com;ustc.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;2;3;4;0+1;4;0+1+3", + "aff_unique_norm": "University of Science and Technology of China;State Key Laboratory of Cognitive Intelligence;University of California, Santa Barbara;Tencent;Alibaba DAMO Academy", + "aff_unique_dep": ";;;Tencent AI Lab;Machine Intelligence Technology Lab", + "aff_unique_url": "http://www.ustc.edu.cn;;https://www.ucsb.edu;https://ai.tencent.com;https://damo.alibaba.com", + "aff_unique_abbr": "USTC;;UCSB;Tencent AI Lab;Alibaba DAMO", + "aff_campus_unique_index": ";1;;", + "aff_campus_unique": ";Santa Barbara", + "aff_country_unique_index": "0+0;1;0;0;0+0;0;0+0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.363", + "title": "Norm-based Noisy Corpora Filtering and Refurbishing in Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent advances in neural machine translation depend on massive parallel corpora, which are collected from any open source without much guarantee of quality. It stresses the need for noisy corpora filtering, but existing methods are insufficient to solve this issue. They spend much time ensembling multiple scorers trained on clean bitexts, unavailable for low-resource languages in practice. In this paper, we propose a norm-based noisy corpora filtering and refurbishing method with no external data and costly scorers. The noisy and clean samples are separated based on how much information from the source and target sides the model requires to fit the given translation. For the unparallel sentence, the target-side history translation is much more important than the source context, contrary to the parallel ones. The amount of these two information flows can be measured by norms of source-/target-side context vectors. Moreover, we propose to reuse the discovered noisy data by generating pseudo labels via online knowledge distillation. Extensive experiments show that our proposed filtering method performs comparably with state-of-the-art noisy corpora filtering techniques but is more efficient and easier to operate. Noisy sample refurbishing further enhances the performance by making the most of the given data.", + "author": "Yu Lu; Jiajun Zhang", + "authorids": "/y/yu-lu/; /j/jiajun-zhang/", + "bibtex": "@inproceedings{lu-zhang-2022-norm,\n title = \"Norm-based Noisy Corpora Filtering and Refurbishing in Neural Machine Translation\",\n author = \"Lu, Yu and\n Zhang, Jiajun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.363/\",\n doi = \"10.18653/v1/2022.emnlp-main.363\",\n pages = \"5414--5425\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.363.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.363/", + "pdf_size": 1009306, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5698807352375909041&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 0, + "aff": "National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China; National Laboratory of Pattern Recognition, Institute of Automation, CAS, Beijing, China+School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China", + "aff_domain": "nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "email": "nlpr.ia.ac.cn;nlpr.ia.ac.cn", + "github": "https://github.com/yulu-dada/Norm_NoisyFiltering", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "National Laboratory of Pattern Recognition;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Automation;School of Artificial Intelligence", + "aff_unique_url": ";http://www.ucas.ac.cn", + "aff_unique_abbr": ";UCAS", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.17", + "title": "Normalized Contrastive Learning for Text-Video Retrieval", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Cross-modal contrastive learning has led the recent advances in multimodal retrieval with its simplicity and effectiveness. In this work, however, we reveal that cross-modal contrastive learning suffers from incorrect normalization of the sum retrieval probabilities of each text or video instance. Specifically, we show that many test instances are either over- or under-represented during retrieval, significantly hurting the retrieval performance. To address this problem, we propose Normalized Contrastive Learning (NCL) which utilizes the Sinkhorn-Knopp algorithm to compute the instance-wise biases that properly normalize the sum retrieval probabilities of each instance so that every text and video instance is fairly represented during cross-modal retrieval. Empirical study shows that NCL brings consistent and significant gains in text-video retrieval on different model architectures, with new state-of-the-art multimodal retrieval metrics on the ActivityNet, MSVD, and MSR-VTT datasets without any architecture engineering.", + "author": "Yookoon Park; Mahmoud Azab; Seungwhan Moon; Bo Xiong; Florian Metze; Gourab Kundu; Kirmani Ahmed", + "authorids": "/y/yookoon-park/; /m/mahmoud-azab/; /s/seungwhan-moon/; /b/bo-xiong/; /f/florian-metze/; /g/gourab-kundu/; /k/kirmani-ahmed/", + "bibtex": "@inproceedings{park-etal-2022-normalized,\n title = \"Normalized Contrastive Learning for Text-Video Retrieval\",\n author = \"Park, Yookoon and\n Azab, Mahmoud and\n Moon, Seungwhan and\n Xiong, Bo and\n Metze, Florian and\n Kundu, Gourab and\n Ahmed, Kirmani\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.17/\",\n doi = \"10.18653/v1/2022.emnlp-main.17\",\n pages = \"248--260\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.17.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.17/", + "pdf_size": 2380387, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10323539493702650955&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff": "Columbia University; Meta; Meta; Meta; Meta; Meta; Meta", + "aff_domain": "columbia.edu;fb.com;fb.com;fb.com;fb.com;fb.com;fb.com", + "email": "columbia.edu;fb.com;fb.com;fb.com;fb.com;fb.com;fb.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;1;1;1;1", + "aff_unique_norm": "Columbia University;Meta Platforms, Inc.", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.columbia.edu;https://meta.com", + "aff_unique_abbr": "Columbia;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.547", + "title": "Normalizing Mutual Information for Robust Adaptive Training for Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite the success of neural machine translation models, tensions between fluency of optimizing target language modeling and source-faithfulness remain as challenges. Previously, Conditional Bilingual Mutual Information (CBMI), a scoring metric for the importance of target sentences and tokens, was proposed to encourage fluent and faithful translations. The score is obtained by combining the probability from the translation model and the target language model, which is then used to assign different weights to losses from sentences and tokens. Meanwhile, we argue this metric is not properly normalized, for which we propose Normalized Pointwise Mutual Information (NPMI). NPMI utilizes an additional language model on source language to approximate the joint likelihood of source-target pair and the likelihood of the source, which is then used for normalizing the score. We showed that NPMI better captures the dependence between source-target and that NPMI-based token-level adaptive training brings improvements over baselines with empirical results from En-De, De-En, and En-Ro translation tasks.", + "author": "Youngwon Lee; Changmin Lee; Hojin Lee; Seung-won Hwang", + "authorids": "/y/youngwon-lee/; /c/changmin-lee/; /h/hojin-lee/; /s/seung-won-hwang/", + "bibtex": "@inproceedings{lee-etal-2022-normalizing,\n title = \"Normalizing Mutual Information for Robust Adaptive Training for Translation\",\n author = \"Lee, Youngwon and\n Lee, Changmin and\n Lee, Hojin and\n Hwang, Seung-won\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.547/\",\n doi = \"10.18653/v1/2022.emnlp-main.547\",\n pages = \"8008--8015\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.547.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.547/", + "pdf_size": 303238, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1825644909215744333&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Seoul National University; Kakao Enterprise, South Korea; Kakao Enterprise, South Korea; Seoul National University", + "aff_domain": "snu.ac.kr;kakaoenterprise.com;kakaoenterprise.com;snu.ac.kr", + "email": "snu.ac.kr;kakaoenterprise.com;kakaoenterprise.com;snu.ac.kr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "Seoul National University;Kakao Enterprise", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.snu.ac.kr;https://enterprise.kakao.com", + "aff_unique_abbr": "SNU;Kakao Enterprise", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.489", + "title": "Not All Errors are Equal: Learning Text Generation Metrics using Stratified Error Synthesis", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Is it possible to build a general and automatic natural language generation (NLG) evaluation metric? Existing learned metrics either perform unsatisfactorily or are restricted to tasks where large human rating data is already available. We introduce SESCORE, a model-based metric that is highly correlated with human judgements without requiring human annotation, by utilizing a novel, iterative error synthesis and severity scoring pipeline. This pipeline applies a series of plausible errors to raw text and assigns severity labels by simulating human judgements with entailment. We evaluate SESCORE against existing metrics by comparing how their scores correlate with human ratings. SESCORE outperforms all prior unsupervised metrics on multiple diverse NLG tasks including machine translation, image captioning, and WebNLG text generation. For WMT 20/21En-De and Zh-En, SESCORE improve the average Kendall correlation with human judgement from 0.154 to 0.195. SESCORE even achieves comparable performance to the best supervised metric COMET, despite receiving no human annotated training data.", + "author": "Wenda Xu; Yi-Lin Tuan; Yujie Lu; Michael Saxon; Lei Li; William Yang Wang", + "authorids": "/w/wenda-xu/; /y/yi-lin-tuan/; /y/yujie-lu/; /m/michael-saxon/; /l/lei-li/; /w/william-yang-wang/", + "bibtex": "@inproceedings{xu-etal-2022-errors,\n title = \"Not All Errors are Equal: Learning Text Generation Metrics using Stratified Error Synthesis\",\n author = \"Xu, Wenda and\n Tuan, Yi-Lin and\n Lu, Yujie and\n Saxon, Michael and\n Li, Lei and\n Wang, William Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.489/\",\n doi = \"10.18653/v1/2022.findings-emnlp.489\",\n pages = \"6559--6574\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.489.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.489/", + "pdf_size": 489510, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5231178565768540017&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "UC Santa Barbara; UC Santa Barbara; UC Santa Barbara; UC Santa Barbara; UC Santa Barbara; UC Santa Barbara", + "aff_domain": "cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu", + "email": "cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu;cs.ucsb.edu", + "github": "https://github.com/xu1998hz/SEScorer", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "University of California, Santa Barbara", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucsb.edu", + "aff_unique_abbr": "UCSB", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Santa Barbara", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.140", + "title": "Not Just Plain Text! Fuel Document-Level Relation Extraction with Explicit Syntax Refinement and Subsentence Modeling", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Document-level relation extraction (DocRE) aims to identify semantic labels among entities within a single document. One major challenge of DocRE is to dig decisive details regarding a specific entity pair from long text. However, in many cases, only a fraction of text carries required information, even in the manually labeled supporting evidence. To better capture and exploit instructive information, we propose a novel expLicit syntAx Refinement and Subsentence mOdeliNg based framework (LARSON). By introducing extra syntactic information, LARSON can model subsentences of arbitrary granularity and efficiently screen instructive ones. Moreover, we incorporate refined syntax into text representations which further improves the performance of LARSON. Experimental results on three benchmark datasets (DocRED, CDR, and GDA) demonstrate that LARSON significantly outperforms existing methods.", + "author": "Zhichao Duan; Xiuxing Li; Zhenyu Li; Zhuo Wang; Jianyong Wang", + "authorids": "/z/zhichao-duan/; /x/xiuxing-li/; /z/zhenyu-li/; /z/zhuo-wang/; /j/jianyong-wang/", + "bibtex": "@inproceedings{duan-etal-2022-just,\n title = \"Not Just Plain Text! Fuel Document-Level Relation Extraction with Explicit Syntax Refinement and Subsentence Modeling\",\n author = \"Duan, Zhichao and\n Li, Xiuxing and\n Li, Zhenyu and\n Wang, Zhuo and\n Wang, Jianyong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.140/\",\n doi = \"10.18653/v1/2022.findings-emnlp.140\",\n pages = \"1941--1951\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.140.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.140/", + "pdf_size": 932321, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2514919654397898101&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Technology, Tsinghua University; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences; Department of Computer Science and Technology, Tsinghua University; Department of Computer Science and Technology, Tsinghua University; Department of Computer Science and Technology, Tsinghua University", + "aff_domain": "mails.tsinghua.edu.cn;ict.ac.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;ict.ac.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+2;0;0;0", + "aff_unique_norm": "Tsinghua University;Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Department of Computer Science and Technology;Institute of Computing Technology;", + "aff_unique_url": "https://www.tsinghua.edu.cn;http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "THU;CAS;UCAS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.247", + "title": "Not to Overfit or Underfit the Source Domains? An Empirical Study of Domain Generalization in Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Machine learning models are prone to overfitting their training (source) domains, which is commonly believed to be the reason why they falter in novel target domains. Here we examine the contrasting view that multi-source domain generalization (DG) is first and foremost a problem of mitigating source domain underfitting: models not adequately learning the signal already present in their multi-domain training data. Experiments on a reading comprehension DG benchmark show that as a model learns its source domains better\u2014using familiar methods such as knowledge distillation (KD) from a bigger model\u2014its zero-shot out-of-domain utility improves at an even faster pace. Improved source domain learning also demonstrates superior out-of-domain generalization over three popular existing DG approaches that aim to limit overfitting. Our implementation of KD-based domain generalization is available via PrimeQA at: https://ibm.biz/domain-generalization-with-kd.", + "author": "Md Arafat Sultan; Avi Sil; Radu Florian", + "authorids": "/m/md-arafat-sultan/; /a/avirup-sil/; /r/radu-florian/", + "bibtex": "@inproceedings{sultan-etal-2022-overfit,\n title = \"Not to Overfit or Underfit the Source Domains? An Empirical Study of Domain Generalization in Question Answering\",\n author = \"Sultan, Md Arafat and\n Sil, Avi and\n Florian, Radu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.247/\",\n doi = \"10.18653/v1/2022.emnlp-main.247\",\n pages = \"3752--3761\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.247.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.247/", + "pdf_size": 686618, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9475019556192113436&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "IBM Research AI; IBM Research AI; IBM Research AI", + "aff_domain": "ibm.com;us.ibm.com;us.ibm.com", + "email": "ibm.com;us.ibm.com;us.ibm.com", + "github": "", + "project": "https://ibm.biz/domain-generalization-with-kd", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "IBM Research", + "aff_unique_dep": "AI", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.91", + "title": "Numerical Optimizations for Weighted Low-rank Estimation on Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Singular value decomposition (SVD) is one of the most popular compression methods that approximate a target matrix with smaller matrices. However, standard SVD treats the parameters within the matrix with equal importance, which is a simple but unrealistic assumption. The parameters of a trained neural network model may affect the task performance unevenly, which suggests non-equal importance among the parameters. Compared to SVD, the decomposition method aware of parameter importance is the more practical choice in real cases. Unlike standard SVD, weighed value decomposition is a non-convex optimization problem that lacks a closed-form solution. We systematically investigated multiple optimization strategies to tackle the problem and examined our method by compressing Transformer-based language models.Further, we designed a metric to predict when the SVD may introduce a significant performance drop, for which our method can be a rescue strategy.The extensive evaluations demonstrate that our method can perform better than current SOTA methods in compressing Transformer-based language models.", + "author": "Ting Hua; Yen-Chang Hsu; Felicity Wang; Qian Lou; Yilin Shen; Hongxia Jin", + "authorids": "/t/ting-hua/; /y/yen-chang-hsu/; /f/felicity-wang/; /q/qian-lou/; /y/yilin-shen/; /h/hongxia-jin/", + "bibtex": "@inproceedings{hua-etal-2022-numerical,\n title = \"Numerical Optimizations for Weighted Low-rank Estimation on Language Models\",\n author = \"Hua, Ting and\n Hsu, Yen-Chang and\n Wang, Felicity and\n Lou, Qian and\n Shen, Yilin and\n Jin, Hongxia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.91/\",\n doi = \"10.18653/v1/2022.emnlp-main.91\",\n pages = \"1404--1416\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.91.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.91/", + "pdf_size": 778654, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12135235880904262402&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Samsung Research America; Samsung Research America; Samsung Research America; Samsung Research America; Samsung Research America; Samsung Research America", + "aff_domain": "samsung.com;samsung.com;samsung.com;samsung.com;samsung.com;samsung.com", + "email": "samsung.com;samsung.com;samsung.com;samsung.com;samsung.com;samsung.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Samsung Research America", + "aff_unique_dep": "", + "aff_unique_url": "https://www.samsung.com/us/careers/research/", + "aff_unique_abbr": "SRA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.377", + "title": "OTSeq2Set: An Optimal Transport Enhanced Sequence-to-Set Model for Extreme Multi-label Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Extreme multi-label text classification (XMTC) is the task of finding the most relevant subset labels from an extremely large-scale label collection. Recently, some deep learning models have achieved state-of-the-art results in XMTC tasks. These models commonly predict scores for all labels by a fully connected layer as the last layer of the model. However, such models can\u2019t predict a relatively complete and variable-length label subset for each document, because they select positive labels relevant to the document by a fixed threshold or take top k labels in descending order of scores. A less popular type of deep learning models called sequence-to-sequence (Seq2Seq) focus on predicting variable-length positive labels in sequence style. However, the labels in XMTC tasks are essentially an unordered set rather than an ordered sequence, the default order of labels restrains Seq2Seq models in training. To address this limitation in Seq2Seq, we propose an autoregressive sequence-to-set model for XMTC tasks named OTSeq2Set. Our model generates predictions in student-forcing scheme and is trained by a loss function based on bipartite matching which enables permutation-invariance. Meanwhile, we use the optimal transport distance as a measurement to force the model to focus on the closest labels in semantic label space. Experiments show that OTSeq2Set outperforms other competitive baselines on 4 benchmark datasets. Especially, on the Wikipedia dataset with 31k labels, it outperforms the state-of-the-art Seq2Seq method by 16.34% in micro-F1 score. The code is available at https://github.com/caojie54/OTSeq2Set.", + "author": "Jie Cao; Yin Zhang", + "authorids": "/j/jie-cao/; /y/yin-zhang/", + "bibtex": "@inproceedings{cao-zhang-2022-otseq2set,\n title = \"{OTS}eq2{S}et: An Optimal Transport Enhanced Sequence-to-Set Model for Extreme Multi-label Text Classification\",\n author = \"Cao, Jie and\n Zhang, Yin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.377/\",\n doi = \"10.18653/v1/2022.emnlp-main.377\",\n pages = \"5588--5597\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.377.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.377/", + "pdf_size": 1623001, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2296463604352619218&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff": "Polytechnic Institute, Zhejiang University; College of Computer Science and Technology, Zhejiang University", + "aff_domain": "zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn", + "github": "https://github.com/caojie54/OTSeq2Set", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Zhejiang University", + "aff_unique_dep": "Polytechnic Institute", + "aff_unique_url": "http://www.zju.edu.cn", + "aff_unique_abbr": "ZJU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Hangzhou;", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.818", + "title": "Offer a Different Perspective: Modeling the Belief Alignment of Arguments in Multi-party Debates", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In contexts where debate and deliberation are the norm, the participants are regularly presented with new information that conflicts with their original beliefs. When required to update their beliefs (belief alignment), they may choose arguments that align with their worldview (confirmation bias). We test this and competing hypotheses in a constraint-based modeling approach to predict the winning arguments in multi-party interactions in the Reddit Change My View and Intelligence Squared debates datasets. We adopt a hierarchical generative Variational Autoencoder as our model and impose structural constraints that reflect competing hypotheses about the nature of argumentation. Our findings suggest that in most settings, predictive models that anticipate winning arguments to be further from the initial argument of the opinion holder are more likely to succeed.", + "author": "Suzanna Sia; Kokil Jaidka; Hansin Ahuja; Niyati Chhaya; Kevin Duh", + "authorids": "/s/suzanna-sia/; /k/kokil-jaidka/; /h/hansin-ahuja/; /n/niyati-chhaya/; /k/kevin-duh/", + "bibtex": "@inproceedings{sia-etal-2022-offer,\n title = \"Offer a Different Perspective: Modeling the Belief Alignment of Arguments in Multi-party Debates\",\n author = \"Sia, Suzanna and\n Jaidka, Kokil and\n Ahuja, Hansin and\n Chhaya, Niyati and\n Duh, Kevin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.818/\",\n doi = \"10.18653/v1/2022.emnlp-main.818\",\n pages = \"11939--11950\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.818.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.818/", + "pdf_size": 907069, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4893920296811093776&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Johns Hopkins University; National University of Singapore; IIT Ropar; Adobe Research India; Johns Hopkins University", + "aff_domain": "jhu.edu;nus.edu.sg;gmail.com;adobe.com;cs.jhu.edu", + "email": "jhu.edu;nus.edu.sg;gmail.com;adobe.com;cs.jhu.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;0", + "aff_unique_norm": "Johns Hopkins University;National University of Singapore;Indian Institute of Technology Ropar;Adobe Research", + "aff_unique_dep": ";;;Adobe Research", + "aff_unique_url": "https://www.jhu.edu;https://www.nus.edu.sg;https://www.iitrpr.ac.in;https://research.adobe.com", + "aff_unique_abbr": "JHU;NUS;IIT Ropar;Adobe Research India", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Ropar", + "aff_country_unique_index": "0;1;2;2;0", + "aff_country_unique": "United States;Singapore;India" + }, + { + "id": "2022.findings-emnlp.194", + "title": "On Advances in Text Generation from Images Beyond Captioning: A Case Study in Self-Rationalization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Combining the visual modality with pretrained language models has been surprisingly effective for simple descriptive tasks such as image captioning. More general text generation however remains elusive. We take a step back and ask: How do these models work for more complex generative tasks, i.e. conditioning on both text and images? Are multimodal models simply visually adapted language models, or do they combine they reason jointly over modalities?We investigate these questions in the context of self-rationalization (jointly generating task labels/answers and free-text explanations) of three tasks: (i) visual question answering in VQA-X, (ii) visual commonsense reasoning in VCR, and (iii) visual-textual entailment in E-SNLI-VE. We show that recent unimodal advances, CLIP image representations and scaling of language models, do not consistently improveself-rationalization in multimodal tasks. We find that no single model type works universally best across tasks, datasets, and finetuning data sizes. Our findings motivate the need for novel general backbones that move text generation from images and text beyond image captioning.", + "author": "Shruti Palaskar; Akshita Bhagia; Yonatan Bisk; Florian Metze; Alan W Black; Ana Marasovic", + "authorids": "/s/shruti-palaskar/; /a/akshita-bhagia/; /y/yonatan-bisk/; /f/florian-metze/; /a/alan-w-black/; /a/ana-marasovic/", + "bibtex": "@inproceedings{palaskar-etal-2022-advances,\n title = \"On Advances in Text Generation from Images Beyond Captioning: A Case Study in Self-Rationalization\",\n author = \"Palaskar, Shruti and\n Bhagia, Akshita and\n Bisk, Yonatan and\n Metze, Florian and\n Black, Alan W and\n Marasovic, Ana\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.194/\",\n doi = \"10.18653/v1/2022.findings-emnlp.194\",\n pages = \"2644--2657\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.194.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.194/", + "pdf_size": 1882773, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18178002676172931167&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Carnegie Mellon University; Allen Institute for AI; Carnegie Mellon University; Carnegie Mellon University; Carnegie Mellon University; University of Utah + Allen Institute for AI", + "aff_domain": "cs.cmu.edu;allenai.org;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;utah.edu", + "email": "cs.cmu.edu;allenai.org;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;utah.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0;2+1", + "aff_unique_norm": "Carnegie Mellon University;Allen Institute for AI;University of Utah", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.cmu.edu;https://allenai.org;https://www.utah.edu", + "aff_unique_abbr": "CMU;AI2;Utah", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.262", + "title": "On Measuring the Intrinsic Few-Shot Hardness of Datasets", + "track": "main", + "status": "Main", + "award": false, + "abstract": "While advances in pre-training have led to dramatic improvements in few-shot learning of NLP tasks, there is limited understanding of what drives successful few-shot adaptation in datasets. In particular, given a new dataset and a pre-trained model, what properties of the dataset make it few-shot learnable, and are these properties independent of the specific adaptation techniques used? We consider an extensive set of recent few-shot learning methods and show that their performance across a large number of datasets is highly correlated, showing that few-shot hardness may be intrinsic to datasets, for a given pre-trained model. To estimate intrinsic few-shot hardness, we then propose a simple and lightweight metric called Spread that captures the intuition that few-shot learning is made possible by exploiting feature-space invariances between training and test samples. Our metric better accounts for few-shot hardness compared to existing notions of hardness and is ~8-100x faster to compute.", + "author": "Xinran Zhao; Shikhar Murty; Christopher Manning", + "authorids": "/x/xinran-zhao/; /s/shikhar-murty/; /c/christopher-d-manning/", + "bibtex": "@inproceedings{zhao-etal-2022-measuring,\n title = \"On Measuring the Intrinsic Few-Shot Hardness of Datasets\",\n author = \"Zhao, Xinran and\n Murty, Shikhar and\n Manning, Christopher\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.262/\",\n doi = \"10.18653/v1/2022.emnlp-main.262\",\n pages = \"3955--3963\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.262.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.262/", + "pdf_size": 393662, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11125623238990496233&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 5, + "aff": "Computer Science Department, Stanford University; Computer Science Department, Stanford University; Computer Science Department, Stanford University", + "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", + "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "Computer Science Department", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.607", + "title": "On Parsing as Tagging", + "track": "main", + "status": "Main", + "award": false, + "abstract": "There are many proposals to reduce constituency parsing to tagging. To figure out what these approaches have in common, we offer a unifying pipeline, which consists of three steps: linearization, learning, and decoding. We prove that classic shift\u2013reduce parsing can be reduced to tetratagging\u2014the state-of-the-art constituency tagger\u2014under two assumptions: right-corner transformation in the linearization step and factored scoring in the learning step. We ask what is the most critical factor that makes parsing-as-tagging methods accurate while being efficient. To answer this question, we empirically evaluate a taxonomy of tagging pipelines with different choices of linearizers, learners, and decoders. Based on the results in English as well as a set of 8 typologically diverse languages, we conclude that the linearization of the derivation tree and its alignment with the input sequence is the most critical factor in achieving accurate parsers as taggers.", + "author": "Afra Amini; Ryan Cotterell", + "authorids": "/a/afra-amini/; /r/ryan-cotterell/", + "bibtex": "@inproceedings{amini-cotterell-2022-parsing,\n title = \"On Parsing as Tagging\",\n author = \"Amini, Afra and\n Cotterell, Ryan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.607/\",\n doi = \"10.18653/v1/2022.emnlp-main.607\",\n pages = \"8884--8900\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.607.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.607/", + "pdf_size": 740845, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7369791153794262426&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 5, + "aff": "ETH Zurich; ETH Zurich", + "aff_domain": "inf.ethz.ch;inf.ethz.ch", + "email": "inf.ethz.ch;inf.ethz.ch", + "github": "https://github.com/rycolab/parsing-as-tagging", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "ETH Zurich", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ethz.ch", + "aff_unique_abbr": "ETHZ", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Switzerland" + }, + { + "id": "2022.findings-emnlp.283", + "title": "On Utilizing Constituent Language Resources to Improve Downstream Tasks in Hinglish", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Performance of downstream NLP tasks on code-switched Hindi-English (aka ) continues to remain a significant challenge. Intuitively, Hindi and English corpora should aid improve task performance on Hinglish. We show that meta-learning framework can effectively utilize the the labelled resources of the downstream tasks in the constituent languages. The proposed approach improves the performance on downstream tasks on code-switched language. We experiment with code-switching benchmark GLUECoS and report significant improvements.", + "author": "Vishwajeet Kumar; Rudra Murthy; Tejas Dhamecha", + "authorids": "/v/vishwajeet-kumar/; /r/rudra-murthy/; /t/tejas-dhamecha/", + "bibtex": "@inproceedings{kumar-etal-2022-utilizing,\n title = \"On Utilizing Constituent Language Resources to Improve Downstream Tasks in {H}inglish\",\n author = \"Kumar, Vishwajeet and\n Murthy, Rudra and\n Dhamecha, Tejas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.283/\",\n doi = \"10.18653/v1/2022.findings-emnlp.283\",\n pages = \"3859--3865\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.283.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.283/", + "pdf_size": 230151, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:ni0RaF1V4SYJ:scholar.google.com/&scioq=On+Utilizing+Constituent+Language+Resources+to+Improve+Downstream+Tasks+in+Hinglish&hl=en&as_sdt=0,33", + "gs_version_total": 3, + "aff": "IBM Research, India; IBM Research, India; IBM Research, India", + "aff_domain": "in.ibm.com;in.ibm.com;in.ibm.com", + "email": "in.ibm.com;in.ibm.com;in.ibm.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "IBM Research", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.emnlp-main.290", + "title": "On the Calibration of Massively Multilingual Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Massively Multilingual Language Models (MMLMs) have recently gained popularity due to their surprising effectiveness in cross-lingual transfer. While there has been much work in evaluating these models for their performance on a variety of tasks and languages, little attention has been paid on how well calibrated these models are with respect to the confidence in their predictions. We first investigate the calibration of MMLMs in the zero-shot setting and observe a clear case of miscalibration in low-resource languages or those which are typologically diverse from English. Next, we empirically show that calibration methods like temperature scaling and label smoothing do reasonably well in improving calibration in the zero-shot scenario. We also find that few-shot examples in the language can further help reduce calibration errors, often substantially. Overall, our work contributes towards building more reliable multilingual models by highlighting the issue of their miscalibration, understanding what language and model-specific factors influence it, and pointing out the strategies to improve the same.", + "author": "Kabir Ahuja; Sunayana Sitaram; Sandipan Dandapat; Monojit Choudhury", + "authorids": "/k/kabir-ahuja/; /s/sunayana-sitaram/; /s/sandipan-dandapat/; /m/monojit-choudhury/", + "bibtex": "@inproceedings{ahuja-etal-2022-calibration,\n title = \"On the Calibration of Massively Multilingual Language Models\",\n author = \"Ahuja, Kabir and\n Sitaram, Sunayana and\n Dandapat, Sandipan and\n Choudhury, Monojit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.290/\",\n doi = \"10.18653/v1/2022.emnlp-main.290\",\n pages = \"4310--4323\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.290.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.290/", + "pdf_size": 579322, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9108191743599219466&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Microsoft Research, India; Microsoft Research, India; Microsoft R&D, India; Microsoft R&D, India", + "aff_domain": "microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;1", + "aff_unique_norm": "Microsoft Research;Microsoft", + "aff_unique_dep": ";R&D", + "aff_unique_url": "https://www.microsoft.com/en-us/research/group/india.aspx;https://www.microsoft.com", + "aff_unique_abbr": "MSR India;Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.findings-emnlp.190", + "title": "On the Curious Case of l2 norm of Sense Embeddings", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We show that the l2 norm of a static sense embedding encodes information related to the frequency of that sense in the training corpus used to learn the sense embeddings. This finding can be seen as an extension of a previously known relationship for word embeddings to sense embeddings. Our experimental results show that in spite of its simplicity, the l2 norm of sense embeddings is a surprisingly effective feature for several word sense related tasks such as (a) most frequent sense prediction, (b) word-in-context (WiC), and (c) word sense disambiguation (WSD). In particular, by simply including the l2 norm of a sense embedding as a feature in a classifier, we show that we can improve WiC and WSD methods that use static sense embeddings.", + "author": "Yi Zhou; Danushka Bollegala", + "authorids": "/y/yi-zhou/; /d/danushka-bollegala/", + "bibtex": "@inproceedings{zhou-bollegala-2022-curious,\n title = \"On the Curious Case of l2 norm of Sense Embeddings\",\n author = \"Zhou, Yi and\n Bollegala, Danushka\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.190/\",\n doi = \"10.18653/v1/2022.findings-emnlp.190\",\n pages = \"2593--2602\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.190.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.190/", + "pdf_size": 1714644, + "gs_citation": -1, + "gs_cited_by_link": "", + "gs_version_total": 0, + "aff": "University of Liverpool; University of Liverpool + Amazon", + "aff_domain": "liverpool.ac.uk;liverpool.ac.uk", + "email": "liverpool.ac.uk;liverpool.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "University of Liverpool;Amazon.com, Inc.", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.liverpool.ac.uk;https://www.amazon.com", + "aff_unique_abbr": "Liv Uni;Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "2022.findings-emnlp.108", + "title": "On the Effectiveness of Automated Metrics for Text Generation Systems", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A major challenge in the field of Text Generation is evaluation, because we lack a sound theory that can be leveraged to extract guidelines for evaluation campaigns. In this work, we propose a first step towards such a theory that incorporates different sources of uncertainty, such as imperfect automated metrics and insufficiently sized test sets. The theory has practical applications, such as determining the number of samples needed to reliably distinguish the performance of a set of Text Generation systems in a given setting. We showcase the application of the theory on the WMT 21 and Spot-The-Bot evaluation data and outline how it can be leveraged to improve the evaluation protocol regarding the reliability, robustness, and significance of the evaluation outcome.", + "author": "Pius von D\u00e4niken; Jan Deriu; Don Tuggener; Mark Cieliebak", + "authorids": "/p/pius-von-daniken/; /j/jan-milan-deriu/; /d/don-tuggener/; /m/mark-cieliebak/", + "bibtex": "@inproceedings{von-daniken-etal-2022-effectiveness,\n title = \"On the Effectiveness of Automated Metrics for Text Generation Systems\",\n author = {von D{\\\"a}niken, Pius and\n Deriu, Jan and\n Tuggener, Don and\n Cieliebak, Mark},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.108/\",\n doi = \"10.18653/v1/2022.findings-emnlp.108\",\n pages = \"1503--1522\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.108.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.108/", + "pdf_size": 653824, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8103406921887771934&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Centre for Artificial Intelligence, ZHAW School of Engineering; Centre for Artificial Intelligence, ZHAW School of Engineering; Centre for Artificial Intelligence, ZHAW School of Engineering; Centre for Artificial Intelligence, ZHAW School of Engineering", + "aff_domain": "zhaw.ch;zhaw.ch;zhaw.ch;zhaw.ch", + "email": "zhaw.ch;zhaw.ch;zhaw.ch;zhaw.ch", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "ZHAW School of Engineering", + "aff_unique_dep": "Centre for Artificial Intelligence", + "aff_unique_url": "https://www.zhawk.ch/en", + "aff_unique_abbr": "ZHAW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Switzerland" + }, + { + "id": "2022.emnlp-main.208", + "title": "On the Evaluation Metrics for Paraphrase Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper we revisit automatic metrics for paraphrase evaluation and obtain two findings that disobey conventional wisdom: (1) Reference-free metrics achieve better performance than their reference-based counterparts. (2) Most commonly used metrics do not align well with human annotation.Underlying reasons behind the above findings are explored through additional experiments and in-depth analyses.Based on the experiments and analyses, we propose ParaScore, a new evaluation metric for paraphrase generation. It possesses the merits of reference-based and reference-free metrics and explicitly models lexical divergence. Based on our analysis and improvements, our proposed reference-based outperforms than reference-free metrics.Experimental results demonstrate that ParaScore significantly outperforms existing metrics.", + "author": "Lingfeng Shen; Lemao Liu; Haiyun Jiang; Shuming Shi", + "authorids": "/l/lingfeng-shen/; /l/lemao-liu/; /h/haiyun-jiang/; /s/shuming-shi/", + "bibtex": "@inproceedings{shen-etal-2022-evaluation,\n title = \"On the Evaluation Metrics for Paraphrase Generation\",\n author = \"Shen, Lingfeng and\n Liu, Lemao and\n Jiang, Haiyun and\n Shi, Shuming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.208/\",\n doi = \"10.18653/v1/2022.emnlp-main.208\",\n pages = \"3178--3190\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.208.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.208/", + "pdf_size": 1192470, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9273890572471723717&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, Johns Hopkins University; Natural Language Processing Center, Tencent AI Lab; Natural Language Processing Center, Tencent AI Lab; Natural Language Processing Center, Tencent AI Lab", + "aff_domain": "jh.edu;tencent.com;tencent.com;tencent.com", + "email": "jh.edu;tencent.com;tencent.com;tencent.com", + "github": "https://github.com/shadowkiller33/ParaScore", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;1", + "aff_unique_norm": "Johns Hopkins University;Tencent AI Lab", + "aff_unique_dep": "Department of Computer Science;Natural Language Processing Center", + "aff_unique_url": "https://www.jhu.edu;https://ailab.tencent.com", + "aff_unique_abbr": "JHU;Tencent AI Lab", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.298", + "title": "On the Impact of Temporal Concept Drift on Model Explanations", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Explanation faithfulness of model predictions in natural language processing is typically evaluated on held-out data from the same temporal distribution as the training data (i.e. synchronous settings). While model performance often deteriorates due to temporal variation (i.e. temporal concept drift), it is currently unknown how explanation faithfulness is impacted when the time span of the target data is different from the data used to train the model (i.e. asynchronous settings). For this purpose, we examine the impact of temporal variation on model explanations extracted by eight feature attribution methods and three select-then-predict models across six text classification tasks. Our experiments show that (i) faithfulness is not consistent under temporal variations across feature attribution methods (e.g. it decreases or increases depending on the method), with an attention-based method demonstrating the most robust faithfulness scores across datasets; and (ii) select-then-predict models are mostly robust in asynchronous settings with only small degradation in predictive performance. Finally, feature attribution methods show conflicting behavior when used in FRESH (i.e. a select-and-predict model) and for measuring sufficiency/comprehensiveness (i.e. as post-hoc methods), suggesting that we need more robust metrics to evaluate post-hoc explanation faithfulness. Code will be made publicly available.", + "author": "Zhixue Zhao; George Chrysostomou; Kalina Bontcheva; Nikolaos Aletras", + "authorids": "/z/zhixue-zhao/; /g/george-chrysostomou/; /k/kalina-bontcheva/; /n/nikolaos-aletras/", + "bibtex": "@inproceedings{zhao-etal-2022-impact,\n title = \"On the Impact of Temporal Concept Drift on Model Explanations\",\n author = \"Zhao, Zhixue and\n Chrysostomou, George and\n Bontcheva, Kalina and\n Aletras, Nikolaos\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.298/\",\n doi = \"10.18653/v1/2022.findings-emnlp.298\",\n pages = \"4039--4054\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.298.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.298/", + "pdf_size": 5078428, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5640674240289119421&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/casszhao/temporal-drift-on-explanation", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.753", + "title": "On the Limitations of Reference-Free Evaluations of Generated Text", + "track": "main", + "status": "Main", + "award": false, + "abstract": "There is significant interest in developing evaluation metrics which accurately estimate the quality of generated text without the aid of a human-written reference text, which can be time consuming and expensive to collect or entirely unavailable in online applications. However, in this work, we demonstrate that these reference-free metrics are inherently biased and limited in their ability to evaluate generated text, and we argue that they should not be used to measure progress on tasks like machine translation or summarization. We show how reference-free metrics are equivalent to using one generation model to evaluate another, which has several limitations: (1) the metrics can be optimized at test time to find the approximate best-possible output, (2) they are inherently biased toward models which are more similar to their own, and (3) they can be biased against higher-quality outputs, including those written by humans. Therefore, we recommend that reference-free metrics should be used as diagnostic tools for analyzing and understanding model behavior instead of measures of how well models perform a task, in which the goal is to achieve as high of a score as possible.", + "author": "Daniel Deutsch; Rotem Dror; Dan Roth", + "authorids": "/d/daniel-deutsch/; /r/rotem-dror/; /d/dan-roth/", + "bibtex": "@inproceedings{deutsch-etal-2022-limitations,\n title = \"On the Limitations of Reference-Free Evaluations of Generated Text\",\n author = \"Deutsch, Daniel and\n Dror, Rotem and\n Roth, Dan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.753/\",\n doi = \"10.18653/v1/2022.emnlp-main.753\",\n pages = \"10960--10977\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.753.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.753/", + "pdf_size": 624326, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13995713577695112054&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Google Research\u2020; University of Pennsylvania\u2021; University of Pennsylvania\u2021", + "aff_domain": "google.com;seas.upenn.edu;seas.upenn.edu", + "email": "google.com;seas.upenn.edu;seas.upenn.edu", + "github": "", + "project": "https://cogcomp.seas.upenn.edu/page/publication_view/991", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Google;University of Pennsylvania", + "aff_unique_dep": "Google Research;", + "aff_unique_url": "https://research.google;https://www.upenn.edu", + "aff_unique_abbr": "Google Research;UPenn", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Mountain View;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.293", + "title": "On the Role of Bidirectionality in Language Model Pre-Training", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Prior work on language model pre-training has explored different architectures and learning objectives, but differences in data, hyperparameters and evaluation make a principled comparison difficult. In this work, we focus on bidirectionality as a key factor that differentiates existing approaches, and present a comprehensive study of its role in next token prediction, text infilling, zero-shot priming and fine-tuning. We propose a new framework that generalizes prior approaches, including fully unidirectional models like GPT, fully bidirectional models like BERT, and hybrid models like CM3 and prefix LM. Our framework distinguishes between two notions of bidirectionality (bidirectional context and bidirectional attention) and allows us to control each of them separately. We find that the optimal configuration is largely application-dependent (e.g., bidirectional attention is beneficial for fine-tuning and infilling, but harmful for next token prediction and zero-shot priming). We train models with up to 6.7B parameters, and find differences to remain consistent at scale. While prior work on scaling has focused on left-to-right autoregressive models, our results suggest that this approach comes with some trade-offs, and it might be worthwhile to develop very large bidirectional models.", + "author": "Mikel Artetxe; Jingfei Du; Naman Goyal; Luke Zettlemoyer; Veselin Stoyanov", + "authorids": "/m/mikel-artetxe/; /j/jingfei-du/; /n/naman-goyal/; /l/luke-zettlemoyer/; /v/veselin-stoyanov/", + "bibtex": "@inproceedings{artetxe-etal-2022-role,\n title = \"On the Role of Bidirectionality in Language Model Pre-Training\",\n author = \"Artetxe, Mikel and\n Du, Jingfei and\n Goyal, Naman and\n Zettlemoyer, Luke and\n Stoyanov, Veselin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.293/\",\n doi = \"10.18653/v1/2022.findings-emnlp.293\",\n pages = \"3973--3985\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.293.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.293/", + "pdf_size": 345575, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16270071382055563258&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Meta AI; Meta AI; Meta AI; Meta AI; Meta AI", + "aff_domain": "meta.com;meta.com;meta.com;meta.com;meta.com", + "email": "meta.com;meta.com;meta.com;meta.com;meta.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Meta Platforms, Inc.", + "aff_unique_dep": "Meta AI", + "aff_unique_url": "https://meta.com", + "aff_unique_abbr": "Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.97", + "title": "On the Transformation of Latent Space in Fine-Tuned NLP Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We study the evolution of latent space in fine-tuned NLP models. Different from the commonly used probing-framework, we opt for an unsupervised method to analyze representations. More specifically, we discover latent concepts in the representational space using hierarchical clustering. We then use an alignment function to gauge the similarity between the latent space of a pre-trained model and its fine-tuned version. We use traditional linguistic concepts to facilitate our understanding and also study how the model space transforms towards task-specific information. We perform a thorough analysis, comparing pre-trained and fine-tuned models across three models and three downstream tasks. The notable findings of our work are: i) the latent space of the higher layers evolve towards task-specific concepts, ii) whereas the lower layers retain generic concepts acquired in the pre-trained model, iii) we discovered that some concepts in the higher layers acquire polarity towards the output class, and iv) that these concepts can be used for generating adversarial triggers.", + "author": "Nadir Durrani; Hassan Sajjad; Fahim Dalvi; Firoj Alam", + "authorids": "/n/nadir-durrani/; /h/hassan-sajjad/; /f/fahim-dalvi/; /f/firoj-alam/", + "bibtex": "@inproceedings{durrani-etal-2022-transformation,\n title = \"On the Transformation of Latent Space in Fine-Tuned {NLP} Models\",\n author = \"Durrani, Nadir and\n Sajjad, Hassan and\n Dalvi, Fahim and\n Alam, Firoj\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.97/\",\n doi = \"10.18653/v1/2022.emnlp-main.97\",\n pages = \"1495--1516\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.97.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.97/", + "pdf_size": 6954312, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14591732012248464365&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Qatar Computing Research Institute, Hamad Bin Khalifa University, Qatar; Faculty of Computer Science, Dalhousie University, Canada; Qatar Computing Research Institute, Hamad Bin Khalifa University, Qatar; Qatar Computing Research Institute, Hamad Bin Khalifa University, Qatar", + "aff_domain": "hbku.edu.qa;dal.ca;hbku.edu.qa;hbku.edu.qa", + "email": "hbku.edu.qa;dal.ca;hbku.edu.qa;hbku.edu.qa", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Hamad Bin Khalifa University;Dalhousie University", + "aff_unique_dep": "Qatar Computing Research Institute;Faculty of Computer Science", + "aff_unique_url": "https://www.qcri.org;https://www.dal.ca", + "aff_unique_abbr": "HBKU;Dal", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "Qatar;Canada" + }, + { + "id": "2022.emnlp-main.496", + "title": "One size does not fit all: Investigating strategies for differentially-private learning across NLP tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Preserving privacy in contemporary NLP models allows us to work with sensitive data, but unfortunately comes at a price. We know that stricter privacy guarantees in differentially-private stochastic gradient descent (DP-SGD) generally degrade model performance. However, previous research on the efficiency of DP-SGD in NLP is inconclusive or even counter-intuitive. In this short paper, we provide an extensive analysis of different privacy preserving strategies on seven downstream datasets in five different \u2018typical\u2019 NLP tasks with varying complexity using modern neural models based on BERT and XtremeDistil architectures. We show that unlike standard non-private approaches to solving NLP tasks, where bigger is usually better, privacy-preserving strategies do not exhibit a winning pattern, and each task and privacy regime requires a special treatment to achieve adequate performance.", + "author": "Manuel Senge; Timour Igamberdiev; Ivan Habernal", + "authorids": "/m/manuel-senge/; /t/timour-igamberdiev/; /i/ivan-habernal/", + "bibtex": "@inproceedings{senge-etal-2022-one,\n title = \"One size does not fit all: Investigating strategies for differentially-private learning across {NLP} tasks\",\n author = \"Senge, Manuel and\n Igamberdiev, Timour and\n Habernal, Ivan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.496/\",\n doi = \"10.18653/v1/2022.emnlp-main.496\",\n pages = \"7340--7353\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.496.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.496/", + "pdf_size": 589483, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11586928797159385757&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Trustworthy Human Language Technologies, Department of Computer Science, Technical University of Darmstadt; Trustworthy Human Language Technologies, Department of Computer Science, Technical University of Darmstadt; Trustworthy Human Language Technologies, Department of Computer Science, Technical University of Darmstadt", + "aff_domain": "web.de;tu-darmstadt.de;tu-darmstadt.de", + "email": "web.de;tu-darmstadt.de;tu-darmstadt.de", + "github": "https://github.com/trusthlt/dp-across-nlp-tasks7340", + "project": "www.trusthlt.org", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Technical University of Darmstadt", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.tu-darmstadt.de", + "aff_unique_abbr": "TUD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.461", + "title": "Open Relation and Event Type Discovery with Type Abstraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Conventional \u201cclosed-world\u201d information extraction (IE) approaches rely on human ontologies to define the scope for extraction. As a result, such approaches fall short when applied to new domains. This calls for systems that can automatically infer new types from given corpora, a task which we refer to as type discovery.To tackle this problem, we introduce the idea of type abstraction, where the model is prompted to generalize and name the type. Then we use the similarity between inferred names to induce clusters. Observing that this abstraction-based representation is often complementary to the entity/trigger token representation, we set up these two representations as two views and design our model as a co-training framework. Our experiments on multiple relation extraction and event extraction datasets consistently show the advantage of our type abstraction approach.", + "author": "Sha Li; Heng Ji; Jiawei Han", + "authorids": "/s/sha-li/; /h/heng-ji/; /j/jiawei-han/", + "bibtex": "@inproceedings{li-etal-2022-open,\n title = \"Open Relation and Event Type Discovery with Type Abstraction\",\n author = \"Li, Sha and\n Ji, Heng and\n Han, Jiawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.461/\",\n doi = \"10.18653/v1/2022.emnlp-main.461\",\n pages = \"6864--6877\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.461.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.461/", + "pdf_size": 525436, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6122893225947699840&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign", + "aff_domain": "illinois.edu;illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign", + "aff_unique_dep": "", + "aff_unique_url": "https://illinois.edu", + "aff_unique_abbr": "UIUC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Urbana-Champaign", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.295", + "title": "Open World Classification with Adaptive Negative Samples", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Open world classification is a task in natural language processing with key practical relevance and impact.Since the open or unknown category data only manifests in the inference phase, finding a model with a suitable decision boundary accommodating for the identification of known classes and discrimination of the open category is challenging.The performance of existing models is limited by the lack of effective open category data during the training stage or the lack of a good mechanism to learn appropriate decision boundaries.We propose an approach based on Adaptive Negative Samples (ANS) designed to generate effective synthetic open category samples in the training stage and without requiring any prior knowledge or external datasets.Empirically, we find a significant advantage in using auxiliary one-versus-rest binary classifiers, which effectively utilize the generated negative samples and avoid the complex threshold-seeking stage in previous works.Extensive experiments on three benchmark datasets show that ANS achieves significant improvements over state-of-the-art methods.", + "author": "Ke Bai; Guoyin Wang; Jiwei Li; Sunghyun Park; Sungjin Lee; Puyang Xu; Ricardo Henao; Lawrence Carin", + "authorids": "/k/ke-bai/; /g/guoyin-wang/; /j/jiwei-li/; /s/sunghyun-park/; /s/sungjin-lee/; /p/puyang-xu/; /r/ricardo-henao/; /l/lawrence-carin/", + "bibtex": "@inproceedings{bai-etal-2022-open,\n title = \"Open World Classification with Adaptive Negative Samples\",\n author = \"Bai, Ke and\n Wang, Guoyin and\n Li, Jiwei and\n Park, Sunghyun and\n Lee, Sungjin and\n Xu, Puyang and\n Henao, Ricardo and\n Carin, Lawrence\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.295/\",\n doi = \"10.18653/v1/2022.emnlp-main.295\",\n pages = \"4378--4392\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.295.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.295/", + "pdf_size": 1689784, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2155440046909220827&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff": "Duke University; Amazon; Zhejiang University; Amazon; Amazon; Amazon; Duke University; KAUST", + "aff_domain": "duke.edu;duke.edu;amazon.com;amazon.com;amazon.com;amazon.com;zju.edu.cn;kaust.edu.sa", + "email": "duke.edu;duke.edu;amazon.com;amazon.com;amazon.com;amazon.com;zju.edu.cn;kaust.edu.sa", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;1;1;1;0;3", + "aff_unique_norm": "Duke University;Amazon.com, Inc.;Zhejiang University;King Abdullah University of Science and Technology", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.duke.edu;https://www.amazon.com;https://www.zju.edu.cn;https://www.kaust.edu.sa", + "aff_unique_abbr": "Duke;Amazon;ZJU;KAUST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0;0;0;2", + "aff_country_unique": "United States;China;Saudi Arabia" + }, + { + "id": "2022.emnlp-main.427", + "title": "Open-Domain Sign Language Translation Learned from Online Video", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing work on sign language translation \u2013 that is, translation from sign language videos into sentences in a written language \u2013 has focused mainly on (1) data collected in a controlled environment or (2) data in a specific domain, which limits the applicability to real-world settings. In this paper, we introduce OpenASL, a large-scale American Sign Language (ASL) - English dataset collected from online video sites (e.g., YouTube).OpenASL contains 288 hours of ASL videos in multiple domains from over 200 signers and is the largest publicly available ASL translation dataset to date. To tackle the challenges of sign language translation in realistic settings and without glosses, we propose a set of techniques including sign search as a pretext task for pre-training and fusion of mouthing and handshape features. The proposed techniques produce consistent and large improvements in translation quality, over baseline models basedon prior work.", + "author": "Bowen Shi; Diane Brentari; Gregory Shakhnarovich; Karen Livescu", + "authorids": "/b/bowen-shi/; /d/diane-brentari/; /g/gregory-shakhnarovich/; /k/karen-livescu/", + "bibtex": "@inproceedings{shi-etal-2022-open,\n title = \"Open-Domain Sign Language Translation Learned from Online Video\",\n author = \"Shi, Bowen and\n Brentari, Diane and\n Shakhnarovich, Gregory and\n Livescu, Karen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.427/\",\n doi = \"10.18653/v1/2022.emnlp-main.427\",\n pages = \"6365--6379\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.427.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.427/", + "pdf_size": 3310832, + "gs_citation": 61, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=86435642426217255&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "TTI-Chicago; Univeristy of Chicago; TTI-Chicago; TTI-Chicago", + "aff_domain": "ttic.edu;uchicago.edu;ttic.edu;ttic.edu", + "email": "ttic.edu;uchicago.edu;ttic.edu;ttic.edu", + "github": "https://github.com/chevalierNoir/OpenASL", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Toyota Technological Institute at Chicago;University of Chicago", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tti-chicago.org;https://www.uchicago.edu", + "aff_unique_abbr": "TTI;UChicago", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chicago;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.188", + "title": "Open-Topic False Information Detection on Social Networks with Contrastive Adversarial Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Current works about false information detection based on conversation graphs on social networks focus primarily on two research streams from the standpoint of topic distribution: in-topic and cross-topic techniques, which assume that the data topic distribution is identical or cross, respectively. This signifies that all test data topics are seen or unseen by the model.However, these assumptions are too harsh for actual social networks that contain both seen and unseen topics simultaneously, hence restricting their practical application.In light of this, this paper develops a novel open-topic scenario that is better suited to actual social networks. In this open-topic scenario, we empirically find that the existing models suffer from impairment in the detection performance for seen or unseen topic data, resulting in poor overall model performance. To address this issue, we propose a novel Contrastive Adversarial Learning Network, CALN, that employs an unsupervised topic clustering method to capture topic-specific features to enhance the model\u2019s performance for seen topics and an unsupervised adversarial learning method to align data representation distributions to enhance the model\u2019s generalisation to unseen topics.Experiments on two benchmark datasets and a variety of graph neural networks demonstrate the effectiveness of our approach.", + "author": "Guanghui Ma; Chunming Hu; Ling Ge; Hong Zhang", + "authorids": "/g/guanghui-ma/; /c/chunming-hu/; /l/ling-ge/; /h/hong-zhang/", + "bibtex": "@inproceedings{ma-etal-2022-open-topic,\n title = \"Open-Topic False Information Detection on Social Networks with Contrastive Adversarial Learning\",\n author = \"Ma, Guanghui and\n Hu, Chunming and\n Ge, Ling and\n Zhang, Hong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.188/\",\n doi = \"10.18653/v1/2022.emnlp-main.188\",\n pages = \"2911--2923\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.188.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.188/", + "pdf_size": 9336997, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18405787734037994928&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "School of Computer Science and Engineering, Beihang University, Beijing, China + DBDC, Beihang University, Beijing, China; College of Software, Beihang University, Beijing, China + DBDC, Beihang University, Beijing, China; School of Computer Science and Engineering, Beihang University, Beijing, China + DBDC, Beihang University, Beijing, China; CNCERT/CC, Beijing, China", + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;isc.org.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;isc.org.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0+0;1", + "aff_unique_norm": "Beihang University;China National Cyber Emergency Response Team/Coordination Center", + "aff_unique_dep": "School of Computer Science and Engineering;", + "aff_unique_url": "http://www.buaa.edu.cn;http://www.cncert.org.cn", + "aff_unique_abbr": "BUAA;CNCERT/CC", + "aff_campus_unique_index": "0+0;0+0;0+0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0+0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.395", + "title": "Open-Vocabulary Argument Role Prediction For Event Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The argument role in event extraction refers to the relation between an event and an argument participating in it. Despite the great progress in event extraction, existing studies still depend on roles pre-defined by domain experts. These studies expose obvious weakness when extending to emerging event types or new domains without available roles. Therefore, more attention and effort needs to be devoted to automatically customizing argument roles. In this paper, we define this essential but under-explored task: open-vocabulary argument role prediction. The goal of this task is to infer a set of argument roles for a given event type. We propose a novel unsupervised framework, RolePred for this task. Specifically, we formulate the role prediction problem as an in-filling task and construct prompts for a pre-trained language model to generate candidate roles. By extracting and analyzing the candidate arguments, the event-specific roles are further merged and selected. To standardize the research of this task, we collect a new human-annotated event extraction dataset including 143 customized argument roles with rich semantics. On this dataset, RolePred outperforms the existing methods by a large margin.", + "author": "Yizhu Jiao; Sha Li; Yiqing Xie; Ming Zhong; Heng Ji; Jiawei Han", + "authorids": "/y/yizhu-jiao/; /s/sha-li/; /y/yiqing-xie/; /m/ming-zhong/; /h/heng-ji/; /j/jiawei-han/", + "bibtex": "@inproceedings{jiao-etal-2022-open,\n title = \"Open-Vocabulary Argument Role Prediction For Event Extraction\",\n author = \"Jiao, Yizhu and\n Li, Sha and\n Xie, Yiqing and\n Zhong, Ming and\n Ji, Heng and\n Han, Jiawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.395/\",\n doi = \"10.18653/v1/2022.findings-emnlp.395\",\n pages = \"5404--5418\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.395.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.395/", + "pdf_size": 1253872, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16153033348401768039&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "University of Illinois at Urbana-Champaign, IL, USA; University of Illinois at Urbana-Champaign, IL, USA; University of Illinois at Urbana-Champaign, IL, USA; University of Illinois at Urbana-Champaign, IL, USA; University of Illinois at Urbana-Champaign, IL, USA; University of Illinois at Urbana-Champaign, IL, USA", + "aff_domain": "illinois.edu;illinois.edu;illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu;illinois.edu;illinois.edu;illinois.edu", + "github": "https://github.com/yzjiao/RolePred", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign", + "aff_unique_dep": "", + "aff_unique_url": "https://illinois.edu", + "aff_unique_abbr": "UIUC", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Urbana-Champaign", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.392", + "title": "Open-domain Question Answering via Chain of Reasoning over Heterogeneous Knowledge", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We propose a novel open-domain question answering (ODQA) framework for answering single/multi-hop questions across heterogeneous knowledge sources.The key novelty of our method is the introduction of the intermediary modules into the current retriever-reader pipeline.Unlike previous methods that solely rely on the retriever for gathering all evidence in isolation,our intermediary performs a chain of reasoning over the retrieved set.Specifically, our method links the retrieved evidence with its related global context into graphs and organizes them into a candidate list of evidence chains.Built upon pretrained language models, our system achieves competitive performance on two ODQA datasets, OTT-QA and NQ, against tables and passages from Wikipedia.In particular, our model substantially outperforms the previous state-of-the-art on OTT-QA with an exact match score of 47.3 (45% relative gain).", + "author": "Kaixin Ma; Hao Cheng; Xiaodong Liu; Eric Nyberg; Jianfeng Gao", + "authorids": "/k/kaixin-ma/; /h/hao-cheng/; /x/xiaodong-liu/; /e/eric-nyberg/; /j/jianfeng-gao/", + "bibtex": "@inproceedings{ma-etal-2022-open-domain,\n title = \"Open-domain Question Answering via Chain of Reasoning over Heterogeneous Knowledge\",\n author = \"Ma, Kaixin and\n Cheng, Hao and\n Liu, Xiaodong and\n Nyberg, Eric and\n Gao, Jianfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.392/\",\n doi = \"10.18653/v1/2022.findings-emnlp.392\",\n pages = \"5360--5374\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.392.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.392/", + "pdf_size": 599678, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10445779985557489185&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Carnegie Mellon University\u2660Microsoft Research; Microsoft Research; Microsoft Research; Carnegie Mellon University\u2660Microsoft Research; Microsoft Research", + "aff_domain": "cs.cmu.edu;microsoft.com;microsoft.com;cs.cmu.edu;microsoft.com", + "email": "cs.cmu.edu;microsoft.com;microsoft.com;cs.cmu.edu;microsoft.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0;1", + "aff_unique_norm": "Carnegie Mellon University;Microsoft Corporation", + "aff_unique_dep": ";Microsoft Research", + "aff_unique_url": "https://www.cmu.edu;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "CMU;MSR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.495", + "title": "Open-domain Video Commentary Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Live commentary plays an important role in sports broadcasts and video games, making spectators more excited and immersed. In this context, though approaches for automatically generating such commentary have been proposed in the past, they have been generally concerned with specific fields, where it is possible to leverage domain-specific information. In light of this, we propose the task of generating video commentary in an open-domain fashion. We detail the construction of a new large-scale dataset of transcribed commentary aligned with videos containing various human actions in a variety of domains, and propose approaches based on well-known neural architectures to tackle the task. To understand the strengths and limitations of current approaches, we present an in-depth empirical study based on our data. Our results suggest clear trade-offs between textual and visual inputs for the models and highlight the importance of relying on external knowledge in this open-domain setting, resulting in a set of robust baselines for our task.", + "author": "Edison Marrese-Taylor; Yumi Hamazono; Tatsuya Ishigaki; Goran Topi\u0107; Yusuke Miyao; Ichiro Kobayashi; Hiroya Takamura", + "authorids": "/e/edison-marrese-taylor/; /y/yumi-hamazono/; /t/tatsuya-ishigaki/; /g/goran-topic/; /y/yusuke-miyao/; /i/ichiro-kobayashi/; /h/hiroya-takamura/", + "bibtex": "@inproceedings{marrese-taylor-etal-2022-open,\n title = \"Open-domain Video Commentary Generation\",\n author = \"Marrese-Taylor, Edison and\n Hamazono, Yumi and\n Ishigaki, Tatsuya and\n Topi{\\'c}, Goran and\n Miyao, Yusuke and\n Kobayashi, Ichiro and\n Takamura, Hiroya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.495/\",\n doi = \"10.18653/v1/2022.emnlp-main.495\",\n pages = \"7326--7339\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.495.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.495/", + "pdf_size": 2462193, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14869930672953874967&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "National Institute of Advanced Industrial Science and Technology; National Institute of Advanced Industrial Science and Technology + Ochanomizu University; National Institute of Advanced Industrial Science and Technology; National Institute of Advanced Industrial Science and Technology; National Institute of Advanced Industrial Science and Technology + The University of Tokyo; National Institute of Advanced Industrial Science and Technology + Ochanomizu University; National Institute of Advanced Industrial Science and Technology", + "aff_domain": "aist.go.jp;is.ocha.ac.jp;aist.go.jp;aist.go.jp;is.s.u-tokyo.ac.jp;is.ocha.ac.jp;aist.go.jp", + "email": "aist.go.jp;is.ocha.ac.jp;aist.go.jp;aist.go.jp;is.s.u-tokyo.ac.jp;is.ocha.ac.jp;aist.go.jp", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0+1;0;0;0+2;0+1;0", + "aff_unique_norm": "National Institute of Advanced Industrial Science and Technology;Ochanomizu University;University of Tokyo", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.aist.go.jp;https://www.ochanomizu-u.ac.jp;https://www.u-tokyo.ac.jp", + "aff_unique_abbr": "AIST;Ochanomizu U;UTokyo", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0+0;0+0;0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.emnlp-main.254", + "title": "Open-ended Knowledge Tracing for Computer Science Education", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In educational applications, knowledge tracing refers to the problem of estimating students\u2019 time-varying concept/skill mastery level from their past responses to questions and predicting their future performance.One key limitation of most existing knowledge tracing methods is that they treat student responses to questions as binary-valued, i.e., whether they are correct or incorrect. Response correctness analysis/prediction is straightforward, but it ignores important information regarding mastery, especially for open-ended questions.In contrast, exact student responses can provide much more information.In this paper, we conduct the first exploration int open-ended knowledge tracing (OKT) by studying the new task of predicting students\u2019 exact open-ended responses to questions.Our work is grounded in the domain of computer science education with programming questions. We develop an initial solution to the OKT problem, a student knowledge-guided code generation approach, that combines program synthesis methods using language models with student knowledge tracing methods. We also conduct a series of quantitative and qualitative experiments on a real-world student code dataset to validate and demonstrate the promise of OKT.", + "author": "Naiming Liu; Zichao Wang; Richard Baraniuk; Andrew Lan", + "authorids": "/n/naiming-liu/; /z/zichao-wang/; /r/richard-baraniuk/; /a/andrew-lan/", + "bibtex": "@inproceedings{liu-etal-2022-open,\n title = \"Open-ended Knowledge Tracing for Computer Science Education\",\n author = \"Liu, Naiming and\n Wang, Zichao and\n Baraniuk, Richard and\n Lan, Andrew\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.254/\",\n doi = \"10.18653/v1/2022.emnlp-main.254\",\n pages = \"3849--3862\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.254.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.254/", + "pdf_size": 5804825, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15451628974816653199&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 4, + "aff": "Rice University; Rice University; Rice University; University of Massachusetts Amherst", + "aff_domain": "rice.edu;rice.edu;rice.edu;cs.umass.edu", + "email": "rice.edu;rice.edu;rice.edu;cs.umass.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Rice University;University of Massachusetts Amherst", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.rice.edu;https://www.umass.edu", + "aff_unique_abbr": "Rice;UMass Amherst", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Amherst", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.811", + "title": "OpenCQA: Open-ended Question Answering with Charts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Charts are very popular to analyze data and convey important insights. People often analyze visualizations to answer open-ended questions that require explanatory answers. Answering such questions are often difficult and time-consuming as it requires a lot of cognitive and perceptual efforts. To address this challenge, we introduce a new task called OpenCQA, where the goal is to answer an open-ended question about a chart with descriptive texts. We present the annotation process and an in-depth analysis of our dataset. We implement and evaluate a set of baselines under three practical settings. In the first setting, a chart and the accompanying article is provided as input to the model. The second setting provides only the relevant paragraph(s) to the chart instead of the entire article, whereas the third setting requires the model to generate an answer solely based on the chart. Our analysis of the results show that the top performing models generally produce fluent and coherent text while they struggle to perform complex logical and arithmetic reasoning.", + "author": "Shankar Kantharaj; Xuan Long Do; Rixie Tiffany Leong; Jia Qing Tan; Enamul Hoque; Shafiq Joty", + "authorids": "/s/shankar-kantharaj/; /x/xuan-long-do/; /r/rixie-tiffany-leong/; /j/jia-qing-tan/; /e/enamul-hoque/; /s/shafiq-joty/", + "bibtex": "@inproceedings{kantharaj-etal-2022-opencqa,\n title = \"{O}pen{CQA}: Open-ended Question Answering with Charts\",\n author = \"Kantharaj, Shankar and\n Do, Xuan Long and\n Leong, Rixie Tiffany and\n Tan, Jia Qing and\n Hoque, Enamul and\n Joty, Shafiq\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.811/\",\n doi = \"10.18653/v1/2022.emnlp-main.811\",\n pages = \"11817--11837\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.811.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.811/", + "pdf_size": 1207896, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7820061529813366840&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6 + }, + { + "id": "2022.findings-emnlp.335", + "title": "Opening up Minds with Argumentative Dialogues", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent research on argumentative dialogues has focused on persuading people to take some action, changing their stance on the topic of discussion, or winning debates. In this work, we focus on argumentative dialogues that aim to open up (rather than change) people\u2019s minds to help them become more understanding to views that are unfamiliar or in opposition to their own convictions. To this end, we present a dataset of 183 argumentative dialogues about 3 controversial topics: veganism, Brexit and COVID-19 vaccination. The dialogues were collected using the Wizard of Oz approach, where wizards leverage a knowledge-base of arguments to converse with participants. Open-mindedness is measured before and after engaging in the dialogue using a questionnaire from the psychology literature, and success of the dialogue is measured as the change in the participant\u2019s stance towards those who hold opinions different to theirs. We evaluate two dialogue models: a Wikipedia-based and an argument-based model. We show that while both models perform closely in terms of opening up minds, the argument-based model is significantly better on other dialogue properties such as engagement and clarity.", + "author": "Youmna Farag; Charlotte Brand; Jacopo Amidei; Paul Piwek; Tom Stafford; Svetlana Stoyanchev; Andreas Vlachos", + "authorids": "/y/youmna-farag/; /c/charlotte-brand/; /j/jacopo-amidei/; /p/paul-piwek/; /t/tom-stafford/; /s/svetlana-stoyanchev/; /a/andreas-vlachos/", + "bibtex": "@inproceedings{farag-etal-2022-opening,\n title = \"Opening up Minds with Argumentative Dialogues\",\n author = \"Farag, Youmna and\n Brand, Charlotte and\n Amidei, Jacopo and\n Piwek, Paul and\n Stafford, Tom and\n Stoyanchev, Svetlana and\n Vlachos, Andreas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.335/\",\n doi = \"10.18653/v1/2022.findings-emnlp.335\",\n pages = \"4569--4582\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.335.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.335/", + "pdf_size": 355386, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4882856560892851868&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 8, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "", + "project": "", + "author_num": 7 + }, + { + "id": "2022.emnlp-main.201", + "title": "Opinion Summarization by Weak-Supervision from Mix-structured Data", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Opinion summarization of multiple reviews suffers from the lack of reference summaries for training.Most previous approaches construct multiple reviews and their summary based on textual similarities between reviews,resulting in information mismatch between the review input and the summary. In this paper, we convert each review into a mixof structured and unstructured data, which we call opinion-aspect pairs (OAs) and implicit sentences (ISs).We propose a new method to synthesize training pairs of such mix-structured data as input and the textual summary as output,and design a summarization model with OA encoder and IS encoder.Experiments show that our approach outperforms previous methods on Yelp, Amazon and RottenTomatos datasets.", + "author": "Yizhu Liu; Qi Jia; Kenny Zhu", + "authorids": "/y/yizhu-liu/; /q/qi-jia/; /k/kenny-zhu/", + "bibtex": "@inproceedings{liu-etal-2022-opinion,\n title = \"Opinion Summarization by Weak-Supervision from Mix-structured Data\",\n author = \"Liu, Yizhu and\n Jia, Qi and\n Zhu, Kenny\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.201/\",\n doi = \"10.18653/v1/2022.emnlp-main.201\",\n pages = \"3086--3096\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.201.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.201/", + "pdf_size": 553405, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4365595432043820248&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;cs.sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;cs.sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.93", + "title": "Outlier Dimensions that Disrupt Transformers are Driven by Frequency", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While Transformer-based language models are generally very robust to pruning, there is the recently discovered outlier phenomenon: disabling only 48 out of 110M parameters in BERT-base drops its performance by nearly 30% on MNLI. We replicate the original evidence for the outlier phenomenon and we link it to the geometry of the embedding space. We find that in both BERT and RoBERTa the magnitude of hidden state coefficients corresponding to outlier dimensions correlate with the frequencies of encoded tokens in pre-training data, and they also contribute to the \u201cvertical\u201d self-attention pattern enabling the model to focus on the special tokens. This explains the drop in performance from disabling the outliers, and it suggests that to decrease anisotopicity in future models we need pre-training schemas that would better take into account the skewed token distributions.", + "author": "Giovanni Puccetti; Anna Rogers; Aleksandr Drozd; Felice Dell\u2019Orletta", + "authorids": "/g/giovanni-puccetti/; /a/anna-rogers/; /a/aleksandr-drozd/; /f/felice-dellorletta/", + "bibtex": "@inproceedings{puccetti-etal-2022-outlier,\n title = \"Outlier Dimensions that Disrupt Transformers are Driven by Frequency\",\n author = \"Puccetti, Giovanni and\n Rogers, Anna and\n Drozd, Aleksandr and\n Dell{'}Orletta, Felice\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.93/\",\n doi = \"10.18653/v1/2022.findings-emnlp.93\",\n pages = \"1286--1304\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.93.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.93/", + "pdf_size": 2059745, + "gs_citation": 41, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7589648502816127160&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.630", + "title": "Overcoming Catastrophic Forgetting in Zero-Shot Cross-Lingual Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we explore the challenging problem of performing a generative task in a target language when labeled data is only available in English, using summarization as a case study. We assume a strict setting with no access to parallel data or machine translation and find that common transfer learning approaches struggle in this setting, as a generative multilingual model fine-tuned purely on English catastrophically forgets how to generate non-English. Given the recent rise of parameter-efficient adaptation techniques, we conduct the first investigation into how one such method, prompt tuning (Lester et al., 2021), can overcome catastrophic forgetting to enable zero-shot cross-lingual generation. Our experiments show that parameter-efficient prompt tuning provides gains over standard fine-tuning when transferring between less-related languages, e.g., from English to Thai. However, a significant gap still remains between these methods and fully-supervised baselines. To improve cross-lingual transfer further, we explore several approaches, including: (1) mixing in unlabeled multilingual data, and (2) explicitly factoring prompts into recombinable language and task components. Our approaches can provide further quality gains, suggesting that robust zero-shot cross-lingual generation is within reach.", + "author": "Tu Vu; Aditya Barua; Brian Lester; Daniel Cer; Mohit Iyyer; Noah Constant", + "authorids": "/t/tu-vu/; /a/aditya-barua/; /b/brian-lester/; /d/daniel-cer/; /m/mohit-iyyer/; /n/noah-constant/", + "bibtex": "@inproceedings{vu-etal-2022-overcoming,\n title = \"Overcoming Catastrophic Forgetting in Zero-Shot Cross-Lingual Generation\",\n author = \"Vu, Tu and\n Barua, Aditya and\n Lester, Brian and\n Cer, Daniel and\n Iyyer, Mohit and\n Constant, Noah\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.630/\",\n doi = \"10.18653/v1/2022.emnlp-main.630\",\n pages = \"9279--9300\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.630.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.630/", + "pdf_size": 1331958, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1553436059437954152&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "Google Research1 + University of Massachusetts Amherst2; Google Research1; Google Research1; Google Research1; University of Massachusetts Amherst2; Google Research1", + "aff_domain": "google.com;google.com;google.com;google.com;cs.umass.edu;google.com", + "email": "google.com;google.com;google.com;google.com;cs.umass.edu;google.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0;0;0;1;0", + "aff_unique_norm": "Google;University of Massachusetts Amherst", + "aff_unique_dep": "Google Research;", + "aff_unique_url": "https://research.google;https://www.umass.edu", + "aff_unique_abbr": "Google Research;UMass Amherst", + "aff_campus_unique_index": "0+1;0;0;0;1;0", + "aff_campus_unique": "Mountain View;Amherst", + "aff_country_unique_index": "0+0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.496", + "title": "P3LM: Probabilistically Permuted Prophet Language Modeling for Generative Pre-Training", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Conventional autoregressive left-to-right (L2R) sequence generation faces two issues during decoding: limited to unidirectional target sequence modeling, and constrained on strong local dependencies.To address the aforementioned problem, we propose P3LM, a probabilistically permuted prophet language model, which strengthens the modeling of bidirectional information and long token dependencies for sequence generation.Specifically, P3LM learns to generate tokens in permuted order upon an order-aware transformer decoder, as well as to generate the corresponding future N tokens with a multi-stream attention mechanism.Extensive experiments are conducted on the GLGE benchmark, which includes four datasets for summarization, two for question generation, one for conversational question answering, and one for dialog response generation, where P3LM achieves state-of-the-art results compared with strong publicly available generative pre-training methods.", + "author": "Junwei Bao; Yifan Wang; Ying Jiangyong; Yeyun Gong; Jing Zhao; Youzheng Wu; Xiaodong He", + "authorids": "/j/junwei-bao/; /y/yifan-wang/; /y/ying-jiangyong/; /y/yeyun-gong/; /j/jing-zhao/; /y/youzheng-wu/; /x/xiaodong-he/", + "bibtex": "@inproceedings{bao-etal-2022-p3lm,\n title = \"{P}3{LM}: Probabilistically Permuted Prophet Language Modeling for Generative Pre-Training\",\n author = \"Bao, Junwei and\n Wang, Yifan and\n Jiangyong, Ying and\n Gong, Yeyun and\n Zhao, Jing and\n Wu, Youzheng and\n He, Xiaodong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.496/\",\n doi = \"10.18653/v1/2022.findings-emnlp.496\",\n pages = \"6663--6675\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.496.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.496/", + "pdf_size": 1048029, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:CYk208hhNpQJ:scholar.google.com/&scioq=P3LM:+Probabilistically+Permuted+Prophet+Language+Modeling+for+Generative+Pre-Training&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff": "JD AI Research; JD AI Research; Huawei Technologies; Microsoft Research Asia; JD AI Research; JD AI Research; JD AI Research", + "aff_domain": "jd.com; ;huawei.com;microsoft.com; ; ;gmail.com", + "email": "jd.com; ;huawei.com;microsoft.com; ; ;gmail.com", + "github": "https://github.com/JunweiBao/P3LM", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;2;0;0;0", + "aff_unique_norm": "JD AI Research;Huawei Technologies;Microsoft Research", + "aff_unique_dep": ";;Research", + "aff_unique_url": "https://www.jd.com;https://www.huawei.com;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "JD AI;Huawei;MSR Asia", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.469", + "title": "PACIFIC: Towards Proactive Conversational Question Answering over Tabular and Textual Data in Finance", + "track": "main", + "status": "Main", + "award": false, + "abstract": "To facilitate conversational question answering (CQA) over hybrid contexts in finance, we present a new dataset, named PACIFIC. Compared with existing CQA datasets, PACIFIC exhibits three key features: (i) proactivity, (ii) numerical reasoning, and (iii) hybrid context of tables and text. A new task is defined accordingly to study Proactive Conversational Question Answering (PCQA), which combines clarification question generation and CQA. In addition, we propose a novel method, namely UniPCQA, to adapt a hybrid format of input and output content in PCQA into the Seq2Seq problem, including the reformulation of the numerical reasoning process as code generation. UniPCQA performs multi-task learning over all sub-tasks in PCQA and incorporates a simple ensemble strategy to alleviate the error propagation issue in the multi-task learning by cross-validating top-k sampled Seq2Seq outputs. We benchmark the PACIFIC dataset with extensive baselines and provide comprehensive evaluations on each sub-task of PCQA.", + "author": "Yang Deng; Wenqiang Lei; Wenxuan Zhang; Wai Lam; Tat-Seng Chua", + "authorids": "/y/yang-deng/; /w/wenqiang-lei/; /w/wenxuan-zhang/; /w/wai-lam/; /t/tat-seng-chua/", + "bibtex": "@inproceedings{deng-etal-2022-pacific,\n title = \"{PACIFIC}: Towards Proactive Conversational Question Answering over Tabular and Textual Data in Finance\",\n author = \"Deng, Yang and\n Lei, Wenqiang and\n Zhang, Wenxuan and\n Lam, Wai and\n Chua, Tat-Seng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.469/\",\n doi = \"10.18653/v1/2022.emnlp-main.469\",\n pages = \"6970--6984\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.469.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.469/", + "pdf_size": 1444453, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7678113307589816166&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 6, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-industry.40", + "title": "PAIGE: Personalized Adaptive Interactions Graph Encoder for Query Rewriting in Dialogue Systems", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Unexpected responses or repeated clarification questions from conversational agents detract from the users\u2019 experience with technology meant to streamline their daily tasks. To reduce these frictions, Query Rewriting (QR) techniques replace transcripts of faulty queries with alternatives that lead to responses thatsatisfy the users\u2019 needs. Despite their successes, existing QR approaches are limited in their ability to fix queries that require considering users\u2019 personal preferences. We improve QR by proposing Personalized Adaptive Interactions Graph Encoder (PAIGE).PAIGE is the first QR architecture that jointly models user\u2019s affinities and query semantics end-to-end. The core idea is to represent previous user-agent interactions and world knowledge in a structured form \u2014 a heterogeneous graph \u2014 and apply message passing to propagate latent representations of users\u2019 affinities to refine utterance embeddings.Using these embeddings, PAIGE can potentially provide different rewrites given the same query for users with different preferences. Our model, trained without any human-annotated data, improves the rewrite retrieval precision of state-of-the-art baselines by 12.5\u201317.5% while having nearly ten times fewer parameters.", + "author": "Daniel Bi\u015b; Saurabh Gupta; Jie Hao; Xing Fan; Chenlei Guo", + "authorids": "/d/daniel-bis/; /s/saurabh-gupta/; /j/jie-hao/; /x/xing-fan/; /c/chenlei-guo/", + "bibtex": "@inproceedings{bis-etal-2022-paige,\n title = \"{PAIGE}: Personalized Adaptive Interactions Graph Encoder for Query Rewriting in Dialogue Systems\",\n author = \"Bi{\\'s}, Daniel and\n Gupta, Saurabh and\n Hao, Jie and\n Fan, Xing and\n Guo, Chenlei\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.40/\",\n doi = \"10.18653/v1/2022.emnlp-industry.40\",\n pages = \"398--408\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.40.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.40/", + "pdf_size": 454057, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6246042036927257237&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff": "Amazon Alexa AI; LinkedIn + Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI", + "aff_domain": "amazon.com;gmail.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;gmail.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+0;0;0;0", + "aff_unique_norm": "Amazon;LinkedIn Corporation", + "aff_unique_dep": "Alexa AI;", + "aff_unique_url": "https://www.amazon.com;https://www.linkedin.com", + "aff_unique_abbr": "Amazon;LinkedIn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.11", + "title": "PAIR: Prompt-Aware margIn Ranking for Counselor Reflection Scoring in Motivational Interviewing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Counselor reflection is a core verbal skill used by mental health counselors to express understanding and affirmation of the client\u2019s experience and concerns. In this paper, we propose a system for the analysis of counselor reflections. Specifically, our system takes as input one dialog turn containing a client prompt and a counselor response, and outputs a score indicating the level of reflection in the counselor response. We compile a dataset consisting of different levels of reflective listening skills, and propose the Prompt-Aware margIn Ranking (PAIR) framework that contrasts positive and negative prompt and response pairs using specially designed multi-gap and prompt-aware margin ranking losses. Through empirical evaluations and deployment of our system in a real-life educational environment, we show that our analysis model outperforms several baselines on different metrics, and can be used to provide useful feedback to counseling trainees.", + "author": "Do June Min; Ver\u00f3nica P\u00e9rez-Rosas; Kenneth Resnicow; Rada Mihalcea", + "authorids": "/d/do-june-min/; /v/veronica-perez-rosas/; /k/kenneth-resnicow/; /r/rada-mihalcea/", + "bibtex": "@inproceedings{min-etal-2022-pair,\n title = \"{PAIR}: Prompt-Aware marg{I}n Ranking for Counselor Reflection Scoring in Motivational Interviewing\",\n author = \"Min, Do June and\n P{\\'e}rez-Rosas, Ver{\\'o}nica and\n Resnicow, Kenneth and\n Mihalcea, Rada\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.11/\",\n doi = \"10.18653/v1/2022.emnlp-main.11\",\n pages = \"148--158\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.11.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.11/", + "pdf_size": 684834, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10965250018262076879&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Department of Electrical Engineering and Computer Science; Department of Electrical Engineering and Computer Science; School of Public Health; Department of Electrical Engineering and Computer Science", + "aff_domain": "umich.edu;umich.edu;umich.edu;umich.edu", + "email": "umich.edu;umich.edu;umich.edu;umich.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Massachusetts Institute of Technology;School of Public Health", + "aff_unique_dep": "Department of Electrical Engineering and Computer Science;Public Health", + "aff_unique_url": "https://web.mit.edu/;", + "aff_unique_abbr": "MIT;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Cambridge;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States;" + }, + { + "id": "2022.findings-emnlp.281", + "title": "PALT: Parameter-Lite Transfer of Language Models for Knowledge Graph Completion", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This paper presents a parameter-lite transfer learning approach of pretrained language models (LM) for knowledge graph (KG) completion. Instead of finetuning, which modifies all LM parameters, we only tune a few new parameters while keeping the original LM parameters fixed. We establish this via reformulating KG completion as a \u201cfill-in-the-blank\u201d task, and introducing a parameter-lite encoder on top of the original LMs. We show that, by tuning far fewer parameters than finetuning, LMs transfer non-trivially to most tasks and reach competitiveness with prior state-of-the-art approaches. For instance, we outperform the fully finetuning approaches on a KG completion benchmark by tuning only 1% of the parameters.", + "author": "Jianhao Shen; Chenguang Wang; Ye Yuan; Jiawei Han; Heng Ji; Koushik Sen; Ming Zhang; Dawn Song", + "authorids": "/j/jianhao-shen/; /c/chenguang-wang/; /y/ye-yuan/; /j/jiawei-han/; /h/heng-ji/; /k/koushik-sen/; /m/ming-zhang/; /d/dawn-song/", + "bibtex": "@inproceedings{shen-etal-2022-palt,\n title = \"{PALT}: Parameter-Lite Transfer of Language Models for Knowledge Graph Completion\",\n author = \"Shen, Jianhao and\n Wang, Chenguang and\n Yuan, Ye and\n Han, Jiawei and\n Ji, Heng and\n Sen, Koushik and\n Zhang, Ming and\n Song, Dawn\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.281/\",\n doi = \"10.18653/v1/2022.findings-emnlp.281\",\n pages = \"3833--3847\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.281.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.281/", + "pdf_size": 2184931, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10736202934838057400&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "Peking University; Washington University in St. Louis; University of Illinois at Urbana-Champaign; UC Berkeley; University of Illinois at Urbana-Champaign; UC Berkeley; Peking University; UC Berkeley", + "aff_domain": "pku.edu.cn;wustl.edu;pku.edu.cn;illinois.edu;illinois.edu;berkeley.edu;pku.edu.cn;berkeley.edu", + "email": "pku.edu.cn;wustl.edu;pku.edu.cn;illinois.edu;illinois.edu;berkeley.edu;pku.edu.cn;berkeley.edu", + "github": "https://github.com/yuanyehome/PALT", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;3;2;3;0;3", + "aff_unique_norm": "Peking University;Washington University in St. Louis;University of Illinois at Urbana-Champaign;University of California, Berkeley", + "aff_unique_dep": ";;;", + "aff_unique_url": "http://www.pku.edu.cn;https://wustl.edu;https://illinois.edu;https://www.berkeley.edu", + "aff_unique_abbr": "Peking U;WashU;UIUC;UC Berkeley", + "aff_campus_unique_index": "1;2;3;2;3;3", + "aff_campus_unique": ";St. Louis;Urbana-Champaign;Berkeley", + "aff_country_unique_index": "0;1;1;1;1;1;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.824", + "title": "PAR: Political Actor Representation Learning with Social Context and Expert Knowledge", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Modeling the ideological perspectives of political actors is an essential task in computational political science with applications in many downstream tasks. Existing approaches are generally limited to textual data and voting records, while they neglect the rich social context and valuable expert knowledge for holistic ideological analysis. In this paper, we propose PAR, a Political Actor Representation learning framework that jointly leverages social context and expert knowledge. Specifically, we retrieve and extract factual statements about legislators to leverage social context information. We then construct a heterogeneous information network to incorporate social context and use relational graph neural networks to learn legislator representations. Finally, we train PAR with three objectives to align representation learning with expert knowledge, model ideological stance consistency, and simulate the echo chamber phenomenon. Extensive experiments demonstrate that PAR is better at augmenting political text understanding and successfully advances the state-of-the-art in political perspective detection and roll call vote prediction. Further analysis proves that PAR learns representations that reflect the political reality and provide new insights into political behavior.", + "author": "Shangbin Feng; Zhaoxuan Tan; Zilong Chen; Ningnan Wang; Peisheng Yu; Qinghua Zheng; Xiaojun Chang; Minnan Luo", + "authorids": "/s/shangbin-feng/; /z/zhaoxuan-tan/; /z/zilong-chen/; /n/ningnan-wang/; /p/peisheng-yu/; /q/qinghua-zheng/; /x/xiaojun-chang/; /m/minnan-luo/", + "bibtex": "@inproceedings{feng-etal-2022-par,\n title = \"{PAR}: Political Actor Representation Learning with Social Context and Expert Knowledge\",\n author = \"Feng, Shangbin and\n Tan, Zhaoxuan and\n Chen, Zilong and\n Wang, Ningnan and\n Yu, Peisheng and\n Zheng, Qinghua and\n Chang, Xiaojun and\n Luo, Minnan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.824/\",\n doi = \"10.18653/v1/2022.emnlp-main.824\",\n pages = \"12022--12036\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.824.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.824/", + "pdf_size": 3279158, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17557438838446576505&as_sdt=20000005&sciodt=0,21&hl=en", + "gs_version_total": 5, + "aff": "Xi\u2019an Jiaotong University\u2663; Xi\u2019an Jiaotong University\u2663; Xi\u2019an Jiaotong University\u2663; Xi\u2019an Jiaotong University\u2663; Xi\u2019an Jiaotong University\u2663; Xi\u2019an Jiaotong University\u2663; University of Technology Sydney\u20dd; Xi\u2019an Jiaotong University\u2663", + "aff_domain": "cs.washington.edu; ; ; ; ; ; ; ", + "email": "cs.washington.edu; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;1;0", + "aff_unique_norm": "Xi'an Jiaotong University;University of Technology Sydney", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.xjtu.edu.cn;https://www.uts.edu.au", + "aff_unique_abbr": "XJTU;UTS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "2022.emnlp-main.331", + "title": "PASTA: Table-Operations Aware Fact Verification via Sentence-Table Cloze Pre-training", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Fact verification has attracted a lot of attention recently, e.g., in journalism, marketing, and policymaking, as misinformation and dis- information can sway one\u2019s opinion and affect one\u2019s actions. While fact-checking is a hard task in general, in many cases, false statements can be easily debunked based on analytics over tables with reliable information. Hence, table- based fact verification has recently emerged as an important and growing research area. Yet, progress has been limited due to the lack of datasets that can be used to pre-train language models (LMs) to be aware of common table operations, such as aggregating a column or comparing tuples. To bridge this gap, this paper introduces PASTA for table-based fact verification via pre-training with synthesized sentence\u2013table cloze questions. In particular, we design six types of common sentence\u2013table cloze tasks, including Filter, Aggregation, Superlative, Comparative, Ordinal, and Unique, based on which we synthesize a large corpus consisting of 1.2 million sentence\u2013table pairs from WikiTables. PASTA uses a recent pre-trained LM, DeBERTaV3, and further pre- trains it on our corpus. Our experimental results show that PASTA achieves new state-of-the-art (SOTA) performance on two table-based fact verification datasets TabFact and SEM-TAB- FACTS. In particular, on the complex set of TabFact, which contains multiple operations, PASTA largely outperforms previous SOTA by 4.7% (85.6% vs. 80.9%), and the gap between PASTA and human performance on the small test set is narrowed to just 1.5% (90.6% vs. 92.1%).", + "author": "Zihui Gu; Ju Fan; Nan Tang; Preslav Nakov; Xiaoman Zhao; Xiaoyong Du", + "authorids": "/z/zihui-gu/; /j/ju-fan/; /n/nan-tang/; /p/preslav-nakov/; /x/xiaoman-zhao/; /x/xiaoyong-du/", + "bibtex": "@inproceedings{gu-etal-2022-pasta,\n title = \"{PASTA}: Table-Operations Aware Fact Verification via Sentence-Table Cloze Pre-training\",\n author = \"Gu, Zihui and\n Fan, Ju and\n Tang, Nan and\n Nakov, Preslav and\n Zhao, Xiaoman and\n Du, Xiaoyong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.331/\",\n doi = \"10.18653/v1/2022.emnlp-main.331\",\n pages = \"4971--4983\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.331.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.331/", + "pdf_size": 2828050, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4567135113677317297&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Renmin University of China; Renmin University of China; Qatar Computing Research Institute, HBKU; Mohamed bin Zayed University of Artificial Intelligence; Renmin University of China; Renmin University of China", + "aff_domain": "ruc.edu.cn;ruc.edu.cn;hbku.edu.qa;mbzuai.ac.ae;ruc.edu.cn;ruc.edu.cn", + "email": "ruc.edu.cn;ruc.edu.cn;hbku.edu.qa;mbzuai.ac.ae;ruc.edu.cn;ruc.edu.cn", + "github": "https://github.com/ruc-datalab/PASTA2017", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;0;0", + "aff_unique_norm": "Renmin University of China;Qatar Computing Research Institute;Mohamed bin Zayed University of Artificial Intelligence", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.ruc.edu.cn;https://www.qcri.org;https://www.mbzuai.ac.ae", + "aff_unique_abbr": "RUC;QCRI;MBZUAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;2;0;0", + "aff_country_unique": "China;Qatar;United Arab Emirates" + }, + { + "id": "2022.emnlp-main.241", + "title": "PATS: Sensitivity-aware Noisy Learning for Pretrained Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "A wide range of NLP tasks benefit from the fine-tuning of pretrained language models (PLMs). However, a number of redundant parameters which contribute less to the downstream task are observed in a directly fine-tuned model. We consider the gap between pretraining and downstream tasks hinders the training of these redundant parameters, and results in a suboptimal performance of the overall model. In this paper, we present PATS (Perturbation According To Sensitivity), a noisy training mechanism which considers each parameter\u2019s importance in the downstream task to help fine-tune PLMs. The main idea of PATS is to add bigger noise to parameters with lower sensitivity and vice versa, in order to activate more parameters\u2019 contributions to downstream tasks without affecting the sensitive ones much. Extensive experiments conducted on different tasks of the GLUE benchmark show PATS can consistently empower the fine-tuning of different sizes of PLMs, and the parameters in the well-performing models always have more concentrated distributions of sensitivities, which experimentally proves the effectiveness of our method.", + "author": "Yupeng Zhang; Hongzhi Zhang; Sirui Wang; Wei Wu; Zhoujun Li", + "authorids": "/y/yupeng-zhang/; /h/hongzhi-zhang/; /s/sirui-wang/; /w/wei-wu/; /z/zhoujun-li/", + "bibtex": "@inproceedings{zhang-etal-2022-pats,\n title = \"{PATS}: Sensitivity-aware Noisy Learning for Pretrained Language Models\",\n author = \"Zhang, Yupeng and\n Zhang, Hongzhi and\n Wang, Sirui and\n Wu, Wei and\n Li, Zhoujun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.241/\",\n doi = \"10.18653/v1/2022.emnlp-main.241\",\n pages = \"3680--3687\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.241.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.241/", + "pdf_size": 349143, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8779652845784862353&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 3, + "aff": "Beihang University, Beijing, China+Meituan Inc., Beijing, China; Meituan Inc., Beijing, China; Meituan Inc., Beijing, China; Meituan Inc., Beijing, China; Beihang University, Beijing, China+Meituan Inc., Beijing, China", + "aff_domain": "buaa.edu.cn;meituan.com;meituan.com;meituan.com;buaa.edu.cn", + "email": "buaa.edu.cn;meituan.com;meituan.com;meituan.com;buaa.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;1;1;0+1", + "aff_unique_norm": "Beihang University;Meituan Inc.", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.buaa.edu.cn;https://www.meituan.com", + "aff_unique_abbr": "BUAA;Meituan", + "aff_campus_unique_index": "0+0;0;0;0;0+0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0+0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.175", + "title": "PAUQ: Text-to-SQL in Russian", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Semantic parsing is an important task that allows to democratize human-computer interaction. One of the most popular text-to-SQL datasets with complex and diverse natural language (NL) questions and SQL queries is Spider. We construct and complement a Spider dataset for Russian, thus creating the first publicly available text-to-SQL dataset for this language. While examining its components - NL questions, SQL queries and databases content - we identify limitations of the existing database structure, fill out missing values for tables and add new requests for underrepresented categories. We select thirty functional test sets with different features that can be used for the evaluation of neural models\u2019 abilities. To conduct the experiments, we adapt baseline architectures RAT-SQL and BRIDGE and provide in-depth query component analysis. On the target language, both models demonstrate strong results with monolingual training and improved accuracy in multilingual scenario. In this paper, we also study trade-offs between machine-translated and manually-created NL queries. At present, Russian text-to-SQL is lacking in datasets as well as trained models, and we view this work as an important step towards filling this gap.", + "author": "Daria Bakshandaeva; Oleg Somov; Ekaterina Dmitrieva; Vera Davydova; Elena Tutubalina", + "authorids": "/d/daria-bakshandaeva/; /o/oleg-somov/; /e/ekaterina-dmitrieva/; /v/vera-davydova/; /e/elena-tutubalina/", + "bibtex": "@inproceedings{bakshandaeva-etal-2022-pauq,\n title = \"{PAUQ}: Text-to-{SQL} in {R}ussian\",\n author = \"Bakshandaeva, Daria and\n Somov, Oleg and\n Dmitrieva, Ekaterina and\n Davydova, Vera and\n Tutubalina, Elena\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.175/\",\n doi = \"10.18653/v1/2022.findings-emnlp.175\",\n pages = \"2355--2376\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.175.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.175/", + "pdf_size": 499863, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11368238926016822676&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 5, + "aff": "University of Helsinki; SberDevices+MIPT; HSE University+Sber AI; Sber AI; HSE University+AIRI", + "aff_domain": "gmail.com;gmail.com;gmail.com;gmail.com;gmail.com", + "email": "gmail.com;gmail.com;gmail.com;gmail.com;gmail.com", + "github": "https://github.com/ai-spiderweb/pauq2355", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+2;3+4;4;3+5", + "aff_unique_norm": "University of Helsinki;SberDevices;Moscow Institute of Physics and Technology;Higher School of Economics;Sberbank;Artificial Intelligence Research Institute", + "aff_unique_dep": ";;;;Sber AI;", + "aff_unique_url": "https://www.helsinki.fi;https://sberdevices.ru;https://mipt.ru;https://hse.ru;https://www.sberbank.ru/en;https://www.airi.jp", + "aff_unique_abbr": "UH;SberDevices;MIPT;HSE;Sber;AIRI", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+1;1+1;1;1+2", + "aff_country_unique": "Finland;Russia;Japan" + }, + { + "id": "2022.emnlp-main.826", + "title": "PCL: Peer-Contrastive Learning with Diverse Augmentations for Unsupervised Sentence Embeddings", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Learning sentence embeddings in an unsupervised manner is fundamental in natural language processing. Recent common practice is to couple pre-trained language models with unsupervised contrastive learning, whose success relies on augmenting a sentence with a semantically-close positive instance to construct contrastive pairs. Nonetheless, existing approaches usually depend on a mono-augmenting strategy, which causes learning shortcuts towards the augmenting biases and thus corrupts the quality of sentence embeddings. A straightforward solution is resorting to more diverse positives from a multi-augmenting strategy, while an open question remains about how to unsupervisedly learn from the diverse positives but with uneven augmenting qualities in the text field. As one answer, we propose a novel Peer-Contrastive Learning (PCL) with diverse augmentations. PCL constructs diverse contrastive positives and negatives at the group level for unsupervised sentence embeddings. PCL performs peer-positive contrast as well as peer-network cooperation, which offers an inherent anti-bias ability and an effective way to learn from diverse augmentations. Experiments on STS benchmarks verify the effectiveness of PCL against its competitors in unsupervised sentence embeddings.", + "author": "Qiyu Wu; Chongyang Tao; Tao Shen; Can Xu; Xiubo Geng; Daxin Jiang", + "authorids": "/q/qiyu-wu/; /c/chongyang-tao/; /t/tao-shen/; /c/can-xu/; /x/xiubo-geng/; /d/daxin-jiang/", + "bibtex": "@inproceedings{wu-etal-2022-pcl,\n title = \"{PCL}: Peer-Contrastive Learning with Diverse Augmentations for Unsupervised Sentence Embeddings\",\n author = \"Wu, Qiyu and\n Tao, Chongyang and\n Shen, Tao and\n Xu, Can and\n Geng, Xiubo and\n Jiang, Daxin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.826/\",\n doi = \"10.18653/v1/2022.emnlp-main.826\",\n pages = \"12052--12066\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.826.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.826/", + "pdf_size": 403778, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10117844803995806606&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "The University of Tokyo, Tokyo, Japan; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation", + "aff_domain": "g.ecc.u-tokyo.ac.jp;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "g.ecc.u-tokyo.ac.jp;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "https://github.com/qiyuw/PeerCL", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;1", + "aff_unique_norm": "The University of Tokyo;Microsoft Corporation", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.microsoft.com", + "aff_unique_abbr": "UTokyo;Microsoft", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Tokyo;", + "aff_country_unique_index": "0;1;1;1;1;1", + "aff_country_unique": "Japan;United States" + }, + { + "id": "2022.emnlp-industry.7", + "title": "PENTATRON: PErsonalized coNText-Aware Transformer for Retrieval-based cOnversational uNderstanding", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Conversational understanding is an integral part of modern intelligent devices. In a large fraction of the global traffic from customers using smart digital assistants, frictions in dialogues may be attributed to incorrect understanding of the entities in a customer\u2019s query due to factors including ambiguous mentions, mispronunciation, background noise and faulty on-device signal processing. Such errors are compounded by two common deficiencies from intelligent devices namely, (1) the device not being tailored to individual customers, and (2) the device responses being unaware of the context in the conversation session. Viewing this problem via the lens of retrieval-based search engines, we build and evaluate a scalable entity correction system, PENTATRON. The system leverages a parametric transformer-based language model to learn patterns from in-session customer-device interactions coupled with a non-parametric personalized entity index to compute the correct query, which aids downstream components in reasoning about the best response. In addition to establishing baselines and demonstrating the value of personalized and context-aware systems, we use multitasking to learn the domain of the correct entity. We also investigate the utility of language model prompts. Through extensive experiments, we show a significant upward movement of the key metric (Exact Match) by up to 500.97% (relative to the baseline).", + "author": "Niranjan Uma Naresh; Ziyan Jiang; Ankit Ankit; Sungjin Lee; Jie Hao; Xing Fan; Chenlei Guo", + "authorids": "/n/niranjan-uma-naresh/; /z/ziyan-jiang/; /a/ankit-ankit/; /s/sungjin-lee/; /j/jie-hao/; /x/xing-fan/; /c/chenlei-guo/", + "bibtex": "@inproceedings{uma-naresh-etal-2022-pentatron,\n title = \"{PENTATRON}: {PE}rsonalized co{NT}ext-Aware Transformer for Retrieval-based c{O}nversational u{N}derstanding\",\n author = \"Uma Naresh, Niranjan and\n Jiang, Ziyan and\n Ankit, Ankit and\n Lee, Sungjin and\n Hao, Jie and\n Fan, Xing and\n Guo, Chenlei\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.7/\",\n doi = \"10.18653/v1/2022.emnlp-industry.7\",\n pages = \"90--98\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.7.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.7/", + "pdf_size": 1545125, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15913001721702930537&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Amazon; Amazon; Amazon; Amazon; Amazon; Amazon; Amazon", + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Amazon.com, Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.763", + "title": "PEVL: Position-enhanced Pre-training and Prompt Tuning for Vision-language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Vision-language pre-training (VLP) has shown impressive performance on a wide range of cross-modal tasks, where VLP models without reliance on object detectors are becoming the mainstream due to their superior computation efficiency and competitive performance. However, the removal of object detectors also deprives the capability of VLP models in explicit object modeling, which is essential to various position-sensitive vision-language (VL) tasks, such as referring expression comprehension and visual commonsense reasoning. To address the challenge, we introduce PEVL that enhances the pre-training and prompt tuning of VLP models with explicit object position modeling. Specifically, PEVL reformulates discretized object positions and language in a unified language modeling framework, which facilitates explicit VL alignment during pre-training, and also enables flexible prompt tuning for various downstream tasks. We show that PEVL enables state-of-the-art performance of detector-free VLP models on position-sensitive tasks such as referring expression comprehension and phrase grounding, and also improves the performance on position-insensitive tasks with grounded inputs. We make the data and code for this paper publicly available at https://github.com/thunlp/PEVL.", + "author": "Yuan Yao; Qianyu Chen; Ao Zhang; Wei Ji; Zhiyuan Liu; Tat-Seng Chua; Maosong Sun", + "authorids": "/y/yuan-yao/; /q/qianyu-chen/; /a/ao-zhang/; /w/wei-ji/; /z/zhiyuan-liu/; /t/tat-seng-chua/; /m/maosong-sun/", + "bibtex": "@inproceedings{yao-etal-2022-pevl,\n title = \"{PEVL}: Position-enhanced Pre-training and Prompt Tuning for Vision-language Models\",\n author = \"Yao, Yuan and\n Chen, Qianyu and\n Zhang, Ao and\n Ji, Wei and\n Liu, Zhiyuan and\n Chua, Tat-Seng and\n Sun, Maosong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.763/\",\n doi = \"10.18653/v1/2022.emnlp-main.763\",\n pages = \"11104--11117\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.763.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.763/", + "pdf_size": 1005781, + "gs_citation": 51, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6915425535483006286&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China+Beijing National Research Center for Information Science and Technology+Innovation Center of Tsinghua University, Shanghai, China; Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China+Beijing National Research Center for Information Science and Technology+Innovation Center of Tsinghua University, Shanghai, China; Sea-NExT Joint Lab, Singapore+School of Computing, National University of Singapore, Singapore; Sea-NExT Joint Lab, Singapore+School of Computing, National University of Singapore, Singapore; Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China+Beijing National Research Center for Information Science and Technology+Innovation Center of Tsinghua University, Shanghai, China; Sea-NExT Joint Lab, Singapore+School of Computing, National University of Singapore, Singapore; Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China+Beijing National Research Center for Information Science and Technology+Innovation Center of Tsinghua University, Shanghai, China", + "aff_domain": "163.com;gmail.com; ; ;tsinghua.edu.cn; ;tsinghua.edu.cn", + "email": "163.com;gmail.com; ; ;tsinghua.edu.cn; ;tsinghua.edu.cn", + "github": "https://github.com/thunlp/PEVL", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1+0;0+1+0;2+3;2+3;0+1+0;2+3;0+1+0", + "aff_unique_norm": "Tsinghua University;Beijing National Research Center for Information Science and Technology;Sea-NExT Joint Lab;National University of Singapore", + "aff_unique_dep": "Dept. of Comp. Sci. & Tech.;;;School of Computing", + "aff_unique_url": "https://www.tsinghua.edu.cn;;;https://www.nus.edu.sg", + "aff_unique_abbr": "THU;;;NUS", + "aff_campus_unique_index": "0+2;0+2;;;0+2;;0+2", + "aff_campus_unique": "Beijing;;Shanghai", + "aff_country_unique_index": "0+0+0;0+0+0;1+1;1+1;0+0+0;1+1;0+0+0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.emnlp-main.376", + "title": "PHEE: A Dataset for Pharmacovigilance Event Extraction from Text", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The primary goal of drug safety researchers and regulators is to promptly identify adverse drug reactions. Doing so may in turn prevent or reduce the harm to patients and ultimately improve public health. Evaluating and monitoring drug safety (i.e., pharmacovigilance) involves analyzing an ever growing collection of spontaneous reports from health professionals, physicians, and pharmacists, and information voluntarily submitted by patients. In this scenario, facilitating analysis of such reports via automation has the potential to rapidly identify safety signals. Unfortunately, public resources for developing natural language models for this task are scant. We present PHEE, a novel dataset for pharmacovigilance comprising over 5000 annotated events from medical case reports and biomedical literature, making it the largest such public dataset to date. We describe the hierarchical event schema designed to provide coarse and fine-grained information about patients\u2019 demographics, treatments and (side) effects. Along with the discussion of the dataset, we present a thorough experimental evaluation of current state-of-the-art approaches for biomedical event extraction, point out their limitations, and highlight open challenges to foster future research in this area.", + "author": "Zhaoyue Sun; Jiazheng Li; Gabriele Pergola; Byron Wallace; Bino John; Nigel Greene; Joseph Kim; Yulan He", + "authorids": "/z/zhaoyue-sun/; /j/jiazheng-li/; /g/gabriele-pergola/; /b/byron-c-wallace/; /b/bino-john/; /n/nigel-greene/; /j/joseph-kim/; /y/yulan-he/", + "bibtex": "@inproceedings{sun-etal-2022-phee,\n title = \"{PHEE}: A Dataset for Pharmacovigilance Event Extraction from Text\",\n author = \"Sun, Zhaoyue and\n Li, Jiazheng and\n Pergola, Gabriele and\n Wallace, Byron and\n John, Bino and\n Greene, Nigel and\n Kim, Joseph and\n He, Yulan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.376/\",\n doi = \"10.18653/v1/2022.emnlp-main.376\",\n pages = \"5571--5587\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.376.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.376/", + "pdf_size": 444318, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3588384768942636539&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 10, + "aff": "Department of Computer Science, University of Warwick; Department of Computer Science, University of Warwick; Department of Computer Science, University of Warwick; Khoury College of Computer Sciences, Northeastern University; AstraZeneca; AstraZeneca; AstraZeneca; Department of Informatics, King\u2019s College London + The Alan Turing Institute", + "aff_domain": "warwick.ac.uk;warwick.ac.uk;warwick.ac.uk;northeastern.edu;astrazeneca.com;astrazeneca.com;astrazeneca.com;kcl.ac.uk", + "email": "warwick.ac.uk;warwick.ac.uk;warwick.ac.uk;northeastern.edu;astrazeneca.com;astrazeneca.com;astrazeneca.com;kcl.ac.uk", + "github": "https://github.com/ZhaoyueSun/PHEE", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;1;2;2;2;3+4", + "aff_unique_norm": "University of Warwick;Northeastern University;AstraZeneca;King\u2019s College London;The Alan Turing Institute", + "aff_unique_dep": "Department of Computer Science;Khoury College of Computer Sciences;;Department of Informatics;", + "aff_unique_url": "https://warwick.ac.uk;https://www.northeastern.edu;https://www.astrazeneca.com;https://www.kcl.ac.uk;https://www.turing.ac.uk", + "aff_unique_abbr": "Warwick;NU;AZ;KCL;ATI", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";London", + "aff_country_unique_index": "0;0;0;1;0;0;0;0+0", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "2022.emnlp-industry.60", + "title": "PILE: Pairwise Iterative Logits Ensemble for Multi-Teacher Labeled Distillation", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Pre-trained language models have become a crucial part of ranking systems and achieved very impressive effects recently. To maintain high performance while keeping efficient computations, knowledge distillation is widely used. In this paper, we focus on two key questions in knowledge distillation for ranking models: 1) how to ensemble knowledge from multi-teacher; 2) how to utilize the label information of data in the distillation process. We propose a unified algorithm called Pairwise Iterative Logits Ensemble (PILE) to tackle these two questions simultaneously. PILE ensembles multi-teacher logits supervised by label information in an iterative way and achieved competitive performance in both offline and online experiments. The proposed method has been deployed in a real-world commercial search system.", + "author": "Lianshang Cai; Linhao Zhang; Dehong Ma; Jun Fan; Daiting Shi; Yi Wu; Zhicong Cheng; Simiu Gu; Dawei Yin", + "authorids": "/l/lianshang-cai/; /l/linhao-zhang/; /d/dehong-ma/; /j/jun-fan/; /d/daiting-shi/; /y/yi-wu/; /z/zhicong-cheng/; /s/simiu-gu/; /d/dawei-yin/", + "bibtex": "@inproceedings{cai-etal-2022-pile,\n title = \"{PILE}: Pairwise Iterative Logits Ensemble for Multi-Teacher Labeled Distillation\",\n author = \"Cai, Lianshang and\n Zhang, Linhao and\n Ma, Dehong and\n Fan, Jun and\n Shi, Daiting and\n Wu, Yi and\n Cheng, Zhicong and\n Gu, Simiu and\n Yin, Dawei\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.60/\",\n doi = \"10.18653/v1/2022.emnlp-industry.60\",\n pages = \"587--595\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.60.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.60/", + "pdf_size": 539494, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17744007434349178972&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China + ACM", + "aff_domain": "baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;acm.org", + "email": "baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;acm.org", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;0;0;0+1", + "aff_unique_norm": "Baidu Inc.;Association for Computing Machinery", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.baidu.com;https://www.acm.org", + "aff_unique_abbr": "Baidu;ACM", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0+1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-industry.52", + "title": "PLATO-Ad: A Unified Advertisement Text Generation Framework with Multi-Task Prompt Learning", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Online advertisement text generation aims at generating attractive and persuasive text ads to appeal to users clicking ads or purchasing products. While pretraining-based models have achieved remarkable success in generating high-quality text ads, some challenges still remain, such as ad generation in low-resource scenarios and training efficiency for multiple ad tasks. In this paper, we propose a novel unified text ad generation framework with multi-task prompt learning, called PLATO-Ad, totackle these problems. Specifically, we design a three-phase transfer learning mechanism to tackle the low-resource ad generation problem. Furthermore, we present a novel multi-task prompt learning mechanism to efficiently utilize a single lightweight model to solve multiple ad generation tasks without loss of performance compared to training a separate model for each task. Finally, we conduct offline and online evaluations and experiment results show that PLATO-Ad significantly outperforms the state-of-the-art on both offline and online metrics. PLATO-Ad has been deployed in a leading advertising platform with 3.5% CTR improvement on search ad descriptions and 10.4% CTR improvement on feed ad titles.", + "author": "Zeyang Lei; Chao Zhang; Xinchao Xu; Wenquan Wu; Zheng-yu Niu; Hua Wu; Haifeng Wang; Yi Yang; Shuanglong Li", + "authorids": "/z/zeyang-lei/; /c/chao-zhang-tu/; /x/xinchao-xu/; /w/wenquan-wu/; /z/zheng-yu-niu/; /h/hua-wu/; /h/haifeng-wang/; /y/yi-yang/; /s/shuanglong-li/", + "bibtex": "@inproceedings{lei-etal-2022-plato,\n title = \"{PLATO}-Ad: A Unified Advertisement Text Generation Framework with Multi-Task Prompt Learning\",\n author = \"Lei, Zeyang and\n Zhang, Chao and\n Xu, Xinchao and\n Wu, Wenquan and\n Niu, Zheng-yu and\n Wu, Hua and\n Wang, Haifeng and\n Yang, Yi and\n Li, Shuanglong\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.52/\",\n doi = \"10.18653/v1/2022.emnlp-industry.52\",\n pages = \"512--520\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.52.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.52/", + "pdf_size": 849328, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9329866860610190922&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China", + "aff_domain": "baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com", + "email": "baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com;baidu.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Baidu Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.baidu.com", + "aff_unique_abbr": "Baidu", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.86", + "title": "PLM-based World Models for Text-based Games", + "track": "main", + "status": "Main", + "award": false, + "abstract": "World models have improved the ability of reinforcement learning agents to operate in a sample efficient manner, by being trained to predict plausible changes in the underlying environment. As the core tasks of world models are future prediction and commonsense understanding, our claim is that pre-trained language models (PLMs) already provide a strong base upon which to build world models. Worldformer is a recently proposed world model for text-based game environments, based only partially on PLM and transformers. Our distinction is to fully leverage PLMs as actionable world models in text-based game environments, by reformulating generation as constrained decoding which decomposes actions into verb templates and objects. We show that our model improves future valid action prediction and graph change prediction. Additionally, we show that our model better reflects commonsense than standard PLM.", + "author": "Minsoo Kim; Yeonjoon Jung; Dohyeon Lee; Seung-won Hwang", + "authorids": "/m/minsoo-kim/; /y/yeonjoon-jung/; /d/dohyeon-lee/; /s/seung-won-hwang/", + "bibtex": "@inproceedings{kim-etal-2022-plm,\n title = \"{PLM}-based World Models for Text-based Games\",\n author = \"Kim, Minsoo and\n Jung, Yeonjoon and\n Lee, Dohyeon and\n Hwang, Seung-won\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.86/\",\n doi = \"10.18653/v1/2022.emnlp-main.86\",\n pages = \"1324--1341\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.86.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.86/", + "pdf_size": 484967, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1124266961230446094&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Interdisciplinary Program in Artificial Intelligence, Seoul National University; Department of Artificial Intelligence, Yonsei University; Department of Computer Science and Engineering, Seoul National University; Department of Computer Science and Engineering, Seoul National University", + "aff_domain": "snu.ac.kr;yonsei.ac.kr;snu.ac.kr;snu.ac.kr", + "email": "snu.ac.kr;yonsei.ac.kr;snu.ac.kr;snu.ac.kr", + "github": "https://github.com/mnskim/awm-bart", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Seoul National University;Yonsei University", + "aff_unique_dep": "Interdisciplinary Program in Artificial Intelligence;Department of Artificial Intelligence", + "aff_unique_url": "https://www.snu.ac.kr;https://www.yonsei.ac.kr", + "aff_unique_abbr": "SNU;Yonsei", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Seoul;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.emnlp-main.373", + "title": "PLOG: Table-to-Logic Pretraining for Logical Table-to-Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Logical table-to-text generation is a task that involves generating logically faithful sentences from tables, which requires models to derive logical-level facts from table records via logical inference. It raises a new challenge on the logical-level content planning of table-to-text models. However, directly learning the logical inference knowledge from table-text pairs is very difficult for neural models because of the ambiguity of natural language and the scarcity of parallel data. Hence even large-scale pre-trained language models present low logical fidelity on logical table-to-text. In this work, we propose a Pretrained Logical Form Generator (PLOG) framework to improve generation fidelity. Specifically, PLOG is first pretrained on a table-to-logical-form generation (table-to-logic) task, then finetuned on downstream table-to-text tasks. The logical forms are formally defined with unambiguous semantics. Hence we can collect a large amount of accurate logical forms from tables without human annotation. In addition, PLOG can learn logical inference from table-logic pairs much more reliably than from table-text pairs. To evaluate our model, we further collect a controlled logical table-to-text dataset CONTLOG based on an existing dataset. On two benchmarks, LOGICNLG and CONTLOG, PLOG outperforms strong baselines by a large margin on the logical fidelity, demonstrating the effectiveness of table-to-logic pretraining.", + "author": "Ao Liu; Haoyu Dong; Naoaki Okazaki; Shi Han; Dongmei Zhang", + "authorids": "/a/ao-liu/; /h/haoyu-dong/; /n/naoaki-okazaki/; /s/shi-han/; /d/dongmei-zhang/", + "bibtex": "@inproceedings{liu-etal-2022-plog,\n title = \"{PLOG}: Table-to-Logic Pretraining for Logical Table-to-Text Generation\",\n author = \"Liu, Ao and\n Dong, Haoyu and\n Okazaki, Naoaki and\n Han, Shi and\n Zhang, Dongmei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.373/\",\n doi = \"10.18653/v1/2022.emnlp-main.373\",\n pages = \"5531--5546\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.373.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.373/", + "pdf_size": 1052406, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17025207095046941722&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Tokyo Institute of Technology; Microsoft Research + Tokyo Institute of Technology; Tokyo Institute of Technology; Microsoft Research; Microsoft Research", + "aff_domain": "nlp.c.titech.ac.jp;microsoft.com;c.titech.ac.jp;microsoft.com;microsoft.com", + "email": "nlp.c.titech.ac.jp;microsoft.com;c.titech.ac.jp;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+0;0;1;1", + "aff_unique_norm": "Tokyo Institute of Technology;Microsoft Corporation", + "aff_unique_dep": ";Microsoft Research", + "aff_unique_url": "https://www.titech.ac.jp;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "Titech;MSR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+0;0;1;1", + "aff_country_unique": "Japan;United States" + }, + { + "id": "2022.findings-emnlp.144", + "title": "PM2F2N: Patient Multi-view Multi-modal Feature Fusion Networks for Clinical Outcome Prediction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Clinical outcome prediction is critical to the condition prediction of patients and management of hospital capacities. There are two kinds of medical data, including time series signals recorded by various devices and clinical notes in electronic health records (EHR), which are used for two common prediction targets: mortality and length of stay. Traditional methods focused on utilizing time series data but ignored clinical notes. With the development of deep learning, natural language processing (NLP) and multi-modal learning methods are exploited to jointly model the time series and clinical notes with different modals. However, the existing methods failed to fuse the multi-modal features of patients from different views. Therefore, we propose the patient multi-view multi-modal feature fusion networks for clinical outcome prediction. Firstly, from patient inner view, we propose to utilize the co-attention module to enhance the fine-grained feature interaction between time series and clinical notes from each patient. Secondly, the patient outer view is the correlation between patients, which can be reflected by the structural knowledge in clinical notes. We exploit the structural information extracted from clinical notes to construct the patient correlation graph, and fuse patients\u2019 multi-modal features by graph neural networks (GNN). The experimental results on MIMIC-III benchmark demonstrate the superiority of our method.", + "author": "Ying Zhang; Baohang Zhou; Kehui Song; Xuhui Sui; Guoqing Zhao; Ning Jiang; Xiaojie Yuan", + "authorids": "/y/ying-zhang/; /b/baohang-zhou/; /k/kehui-song/; /x/xuhui-sui/; /g/guoqing-zhao/; /n/ning-jiang/; /x/xiaojie-yuan/", + "bibtex": "@inproceedings{zhang-etal-2022-pm2f2n,\n title = \"P$\\text{M}^2\\text{F}^2${N}: Patient Multi-view Multi-modal Feature Fusion Networks for Clinical Outcome Prediction\",\n author = \"Zhang, Ying and\n Zhou, Baohang and\n Song, Kehui and\n Sui, Xuhui and\n Zhao, Guoqing and\n Jiang, Ning and\n Yuan, Xiaojie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.144/\",\n doi = \"10.18653/v1/2022.findings-emnlp.144\",\n pages = \"1985--1994\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.144.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.144/", + "pdf_size": 1684526, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8008603160157187105&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 0, + "aff": "1College of Computer Science, Nankai University, Tianjin, China+2Tianjin Key Laboratory of Network and Data Security Technology, Tianjin, China; 1College of Computer Science, Nankai University, Tianjin, China+2Tianjin Key Laboratory of Network and Data Security Technology, Tianjin, China; 1College of Computer Science, Nankai University, Tianjin, China+2Tianjin Key Laboratory of Network and Data Security Technology, Tianjin, China; 1College of Computer Science, Nankai University, Tianjin, China+2Tianjin Key Laboratory of Network and Data Security Technology, Tianjin, China; 3Mashang Consumer Finanace Co, Ltd; 3Mashang Consumer Finanace Co, Ltd; 1College of Computer Science, Nankai University, Tianjin, China+2Tianjin Key Laboratory of Network and Data Security Technology, Tianjin, China", + "aff_domain": "dbis.nankai.edu.cn;dbis.nankai.edu.cn;dbis.nankai.edu.cn;dbis.nankai.edu.cn;msxf.com;msxf.com;nankai.edu.cn", + "email": "dbis.nankai.edu.cn;dbis.nankai.edu.cn;dbis.nankai.edu.cn;dbis.nankai.edu.cn;msxf.com;msxf.com;nankai.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;0+1;2;2;0+1", + "aff_unique_norm": "Nankai University;Tianjin Key Laboratory of Network and Data Security Technology;3Mashang Consumer Finance Co, Ltd", + "aff_unique_dep": "College of Computer Science;Network and Data Security Technology;", + "aff_unique_url": "http://www.nankai.edu.cn;;", + "aff_unique_abbr": "Nankai;;", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Tianjin;", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.594", + "title": "POQue: Asking Participant-specific Outcome Questions for a Deeper Understanding of Complex Events", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge about outcomes is critical for complex event understanding but is hard to acquire.We show that by pre-identifying a participant in a complex event, crowdworkers are ableto (1) infer the collective impact of salient events that make up the situation, (2) annotate the volitional engagement of participants in causing the situation, and (3) ground theoutcome of the situation in state changes of the participants. By creating a multi-step interface and a careful quality control strategy, we collect a high quality annotated dataset of8K short newswire narratives and ROCStories with high inter-annotator agreement (0.74-0.96weighted Fleiss Kappa). Our dataset, POQUe (Participant Outcome Questions), enables theexploration and development of models that address multiple aspects of semantic understanding. Experimentally, we show that current language models lag behind human performance in subtle ways through our task formulations that target abstract and specific comprehension of a complex event, its outcome, and a participant\u2019s influence over the event culmination.", + "author": "Sai Vallurupalli; Sayontan Ghosh; Katrin Erk; Niranjan Balasubramanian; Francis Ferraro", + "authorids": "/s/sai-vallurupalli/; /s/sayontan-ghosh/; /k/katrin-erk/; /n/niranjan-balasubramanian/; /f/francis-ferraro/", + "bibtex": "@inproceedings{vallurupalli-etal-2022-poque,\n title = \"{POQ}ue: Asking Participant-specific Outcome Questions for a Deeper Understanding of Complex Events\",\n author = \"Vallurupalli, Sai and\n Ghosh, Sayontan and\n Erk, Katrin and\n Balasubramanian, Niranjan and\n Ferraro, Francis\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.594/\",\n doi = \"10.18653/v1/2022.emnlp-main.594\",\n pages = \"8674--8697\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.594.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.594/", + "pdf_size": 4932181, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14592137817560997976&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff": "University of Maryland, Baltimore County; Stony Brook University; University of Texas, Austin; Stony Brook University; University of Maryland, Baltimore County", + "aff_domain": "umbc.edu;cs.stonybrook.edu;utexas.edu;cs.stonybrook.edu;umbc.edu", + "email": "umbc.edu;cs.stonybrook.edu;utexas.edu;cs.stonybrook.edu;umbc.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;1;0", + "aff_unique_norm": "University of Maryland, Baltimore County;Stony Brook University;University of Texas at Austin", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.umbc.edu;https://www.stonybrook.edu;https://www.utexas.edu", + "aff_unique_abbr": "UMBC;SBU;UT Austin", + "aff_campus_unique_index": "0;2;0", + "aff_campus_unique": "Baltimore County;;Austin", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.171", + "title": "PRINCE: Prefix-Masked Decoding for Knowledge Enhanced Sequence-to-Sequence Pre-Training", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained Language Models (PLMs) have shown effectiveness in various Natural Language Processing (NLP) tasks. Denoising autoencoder is one of the most successful pre-training frameworks, learning to recompose the original text given a noise-corrupted one. The existing studies mainly focus on injecting noises into the input. This paper introduces a simple yet effective pre-training paradigm, equipped with a knowledge-enhanced decoder that predicts the next entity token with noises in the prefix, explicitly strengthening the representation learning of entities that span over multiple input tokens. Specifically, when predicting the next token within an entity, we feed masks into the prefix in place of some of the previous ground-truth tokens that constitute the entity. Our model achieves new state-of-the-art results on two knowledge-driven data-to-text generation tasks with up to 2% BLEU gains.", + "author": "Song Xu; Haoran Li; Peng Yuan; Youzheng Wu; Xiaodong He", + "authorids": "/s/song-xu/; /h/haoran-li/; /p/peng-yuan/; /y/youzheng-wu/; /x/xiaodong-he/", + "bibtex": "@inproceedings{xu-etal-2022-prince,\n title = \"{PRINCE}: Prefix-Masked Decoding for Knowledge Enhanced Sequence-to-Sequence Pre-Training\",\n author = \"Xu, Song and\n Li, Haoran and\n Yuan, Peng and\n Wu, Youzheng and\n He, Xiaodong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.171/\",\n doi = \"10.18653/v1/2022.emnlp-main.171\",\n pages = \"2675--2681\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.171.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.171/", + "pdf_size": 233928, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4023951191373817682&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "JD AI Research; JD AI Research; JD AI Research; JD AI Research; JD AI Research", + "aff_domain": "jd.com;jd.com; ; ; ", + "email": "jd.com;jd.com; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "JD AI Research", + "aff_unique_dep": "", + "aff_unique_url": "https://www.jd.com", + "aff_unique_abbr": "JD AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.698", + "title": "PRO-CS : An Instance-Based Prompt Composition Technique for Code-Switched Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Code-switched (CS) data is ubiquitous in today\u2019s globalized world, but the dearth of annotated datasets in code-switching poses a significant challenge for learning diverse tasks across different language pairs. Parameter-efficient prompt-tuning approaches conditioned on frozen language models have shown promise for transfer learning in limited-resource setups. In this paper, we propose a novel instance-based prompt composition technique, PRO-CS, for CS tasks that combine language and task knowledge. We compare our approach with prompt-tuning and fine-tuning for code-switched tasks on 10 datasets across 4 language pairs. Our model outperforms the prompt-tuning approach by significant margins across all datasets and outperforms or remains at par with fine-tuning by using just 0.18% of total parameters. We also achieve competitive results when compared with the fine-tuned model in the low-resource cross-lingual and cross-task setting, indicating the effectiveness of our approach to incorporate new code-switched tasks.", + "author": "Srijan Bansal; Suraj Tripathi; Sumit Agarwal; Teruko Mitamura; Eric Nyberg", + "authorids": "/s/srijan-bansal/; /s/suraj-tripathi/; /s/sumit-agarwal/; /t/teruko-mitamura/; /e/eric-nyberg/", + "bibtex": "@inproceedings{bansal-etal-2022-pro,\n title = \"{PRO}-{CS} : An Instance-Based Prompt Composition Technique for Code-Switched Tasks\",\n author = \"Bansal, Srijan and\n Tripathi, Suraj and\n Agarwal, Sumit and\n Mitamura, Teruko and\n Nyberg, Eric\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.698/\",\n doi = \"10.18653/v1/2022.emnlp-main.698\",\n pages = \"10243--10255\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.698.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.698/", + "pdf_size": 2838223, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=790029741228685661&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University", + "aff_domain": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "email": "andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu;andrew.cmu.edu", + "github": "https://github.com/srijan-bansal/PRO-CS", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Language Technologies Institute", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.505", + "title": "PaCo: Preconditions Attributed to Commonsense Knowledge", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Humans can seamlessly reason with circumstantial preconditions of commonsense knowledge. We understand that a glass is used for drinking water, unless the glass is broken or the water is toxic. Despite state-of-the-art (SOTA) language models\u2019 (LMs) impressive performance on inferring commonsense knowledge, it is unclear whether they understand the circumstantial preconditions. To address this gap, we propose a novel challenge of reasoning with circumstantial preconditions. We collect a dataset, called PaCo, consisting of 12.4 thousand preconditions of commonsense statements expressed in natural language. Based on this dataset, we create three canonical evaluation tasks and use them to examine the capability of existing LMs to understand situational preconditions. Our results reveal a 10-30% gap between machine and human performance on our tasks, which shows that reasoning with preconditions is an open challenge.", + "author": "Ehsan Qasemi; Filip Ilievski; Muhao Chen; Pedro Szekely", + "authorids": "/e/ehsan-qasemi/; /f/filip-ilievski/; /m/muhao-chen/; /p/pedro-szekely/", + "bibtex": "@inproceedings{qasemi-etal-2022-paco,\n title = \"{P}a{C}o: Preconditions Attributed to Commonsense Knowledge\",\n author = \"Qasemi, Ehsan and\n Ilievski, Filip and\n Chen, Muhao and\n Szekely, Pedro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.505/\",\n doi = \"10.18653/v1/2022.findings-emnlp.505\",\n pages = \"6781--6796\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.505.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.505/", + "pdf_size": 723939, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5565008018143801700&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, University of Southern California + Information Sciences Institute, University of Southern California; Department of Computer Science, University of Southern California + Information Sciences Institute, University of Southern California; Department of Computer Science, University of Southern California + Information Sciences Institute, University of Southern California; Department of Computer Science, University of Southern California + Information Sciences Institute, University of Southern California", + "aff_domain": "usc.edu;isi.edu;usc.edu;usc.edu", + "email": "usc.edu;isi.edu;usc.edu;usc.edu", + "github": "https://github.com/luka-group/PaCo", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0+0;0+0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "0+0;0+0;0+0;0+0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.358", + "title": "Pair-Based Joint Encoding with Relational Graph Convolutional Networks for Emotion-Cause Pair Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Emotion-cause pair extraction (ECPE) aims to extract emotion clauses and corresponding cause clauses, which have recently received growing attention. Previous methods sequentially encode features with a specified order. They first encode the emotion and cause features for clause extraction and then combine them for pair extraction. This lead to an imbalance in inter-task feature interaction where features extracted later have no direct contact with the former. To address this issue, we propose a novel **P**air-**B**ased **J**oint **E**ncoding (**PBJE**) network, which generates pairs and clauses features simultaneously in a joint feature encoding manner to model the causal relationship in clauses. PBJE can balance the information flow among emotion clauses, cause clauses and pairs. From a multi-relational perspective, we construct a heterogeneous undirected graph and apply the Relational Graph Convolutional Network (RGCN) to capture the multiplex relationship between clauses and the relationship between pairs and clauses. Experimental results show that PBJE achieves state-of-the-art performance on the Chinese benchmark corpus.", + "author": "Junlong Liu; Xichen Shang; Qianli Ma", + "authorids": "/j/junlong-liu/; /x/xichen-shang/; /q/qianli-ma/", + "bibtex": "@inproceedings{liu-etal-2022-pair,\n title = \"Pair-Based Joint Encoding with Relational Graph Convolutional Networks for Emotion-Cause Pair Extraction\",\n author = \"Liu, Junlong and\n Shang, Xichen and\n Ma, Qianli\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.358/\",\n doi = \"10.18653/v1/2022.emnlp-main.358\",\n pages = \"5339--5351\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.358.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.358/", + "pdf_size": 545356, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10411643355228991075&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China; School of Computer Science and Engineering, South China University of Technology, Guangzhou, China; School of Computer Science and Engineering, South China University of Technology, Guangzhou, China", + "aff_domain": "foxmail.com; ;scut.edu.cn", + "email": "foxmail.com; ;scut.edu.cn", + "github": "https://github.com/tutuDoki/PBJE-ECPE", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "South China University of Technology", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.scut.edu.cn", + "aff_unique_abbr": "SCUT", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Guangzhou", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.461", + "title": "ParaMac: A General Unsupervised Paraphrase Generation Framework Leveraging Semantic Constraints and Diversifying Mechanisms", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Paraphrase generation reflects the ability to understand the meaning from the language surface form and rephrase it to other expressions. Recent paraphrase generation works have paid attention to unsupervised approaches based on Pre-trained Language Models (PLMs) to avoid heavy reliance on parallel data by utilizing PLMs\u2019 generation ability. However, the generated pairs of existing unsupervised methods are usually weak either in semantic equivalence or expression diversity. In this paper, we present a novel unsupervised paraphrase generation framework called Paraphrase Machine. By employing multi-aspect equivalence constraints and multi-granularity diversifying mechanisms, Paraphrase Machine is able to achieve good semantic equivalence and expressive diversity, producing a high-quality unsupervised paraphrase dataset. Based on this dataset, we train a general paraphrase model, which can be directly applied to rewrite the input sentence of various domains without any fine-tuning, and achieves substantial gains of 9.1% and 3.3% absolutely in BLEU score over previous SOTA on Quora and MSCOCO. By further fine-tuning our model with domain-specific training sets, the improvement can be increased to even 18.0% and 4.6%. Most importantly, by applying it to language understanding and generation tasks under the low-resource setting, we demonstrate that our model can serve as a universal data augmentor to boost the few-shot performance (e.g., average 2.0% gain on GLUE).", + "author": "Jinxin Liu; Jiaxin Shi; Ji Qi; Lei Hou; Juanzi Li; Qi Tian", + "authorids": "/j/jinxin-liu/; /j/jiaxin-shi/; /j/ji-qi/; /l/lei-hou/; /j/juanzi-li/; /q/qi-tian/", + "bibtex": "@inproceedings{liu-etal-2022-paramac,\n title = \"{P}ara{M}ac: A General Unsupervised Paraphrase Generation Framework Leveraging Semantic Constraints and Diversifying Mechanisms\",\n author = \"Liu, Jinxin and\n Shi, Jiaxin and\n Qi, Ji and\n Hou, Lei and\n Li, Juanzi and\n Tian, Qi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.461/\",\n doi = \"10.18653/v1/2022.findings-emnlp.461\",\n pages = \"6193--6206\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.461.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.461/", + "pdf_size": 447652, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11799558380953057595&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Technology, BNRist + THU - Siemens Ltd., China Joint Research Center for Industrial Intelligence and IoT, Tsinghua University, Beijing, China; Huawei Cloud Computing Technologies Co., Ltd.; Department of Computer Science and Technology, BNRist + THU - Siemens Ltd., China Joint Research Center for Industrial Intelligence and IoT, Tsinghua University, Beijing, China; Department of Computer Science and Technology, BNRist + THU - Siemens Ltd., China Joint Research Center for Industrial Intelligence and IoT, Tsinghua University, Beijing, China; Department of Computer Science and Technology, BNRist + THU - Siemens Ltd., China Joint Research Center for Industrial Intelligence and IoT, Tsinghua University, Beijing, China; Huawei Cloud Computing Technologies Co., Ltd.", + "aff_domain": "mails.tsinghua.edu.cn; ; ; ;mail.tsinghua.edu.cn; ", + "email": "mails.tsinghua.edu.cn; ; ; ;mail.tsinghua.edu.cn; ", + "github": "https://github.com/Matthewlliu/UnsupervisedParaphrase", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;0+1;0+1;0+1;2", + "aff_unique_norm": "BNRist;Tsinghua University;Huawei Cloud Computing Technologies Co., Ltd.", + "aff_unique_dep": "Department of Computer Science and Technology;China Joint Research Center for Industrial Intelligence and IoT;", + "aff_unique_url": ";https://www.tsinghua.edu.cn;https://www.huawei.com/en/cloud", + "aff_unique_abbr": ";THU;Huawei Cloud", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "1;1;1;1;1;1", + "aff_country_unique": ";China" + }, + { + "id": "2022.emnlp-main.479", + "title": "ParaTag: A Dataset of Paraphrase Tagging for Fine-Grained Labels, NLG Evaluation, and Data Augmentation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Paraphrase identification has been formulated as a binary classification task to decide whether two sentences hold a paraphrase relationship. Existing paraphrase datasets only annotate a binary label for each sentence pair. However, after a systematical analysis of existing paraphrase datasets, we found that the degree of paraphrase cannot be well characterized by a single binary label. And the criteria of paraphrase are not even consistent within the same dataset. We hypothesize that such issues would limit the effectiveness of paraphrase models trained on these data. To this end, we propose a novel fine-grained paraphrase annotation schema that labels the minimum spans of tokens in a sentence that don\u2019t have the corresponding paraphrases in the other sentence. Under this setting, we frame paraphrasing as a sequence tagging task. We collect 30k sentence pairs in English with the new annotation schema, resulting in the ParaTag dataset. In addition to reporting baseline results on ParaTag using state-of-art language models, we show that ParaTag is especially useful for training an automatic scorer for language generation evaluation. Finally, we train a paraphrase generation model from ParaTag and achieve better data augmentation performance on the GLUE benchmark than other public paraphrasing datasets.", + "author": "Shuohang Wang; Ruochen Xu; Yang Liu; Chenguang Zhu; Michael Zeng", + "authorids": "/s/shuohang-wang/; /r/ruochen-xu/; /y/yang-liu/; /c/chenguang-zhu/; /m/michael-zeng/", + "bibtex": "@inproceedings{wang-etal-2022-paratag,\n title = \"{P}ara{T}ag: A Dataset of Paraphrase Tagging for Fine-Grained Labels, {NLG} Evaluation, and Data Augmentation\",\n author = \"Wang, Shuohang and\n Xu, Ruochen and\n Liu, Yang and\n Zhu, Chenguang and\n Zeng, Michael\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.479/\",\n doi = \"10.18653/v1/2022.emnlp-main.479\",\n pages = \"7111--7122\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.479.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.479/", + "pdf_size": 411941, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=700678677502090657&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research; Microsoft Azure Cognitive Services Research", + "aff_domain": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "https://github.com/microsoft/ParaTag", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Microsoft", + "aff_unique_dep": "Azure Cognitive Services Research", + "aff_unique_url": "https://www.microsoft.com", + "aff_unique_abbr": "Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.514", + "title": "Parameter-Efficient Tuning Makes a Good Classification Head", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In recent years, pretrained models revolutionized the paradigm of natural language understanding (NLU), where we append a randomly initialized classification head after the pretrained backbone, e.g. BERT, and finetune the whole model. As the pretrained backbone makes a major contribution to the improvement, we naturally expect a good pretrained classification head can also benefit the training. However, the final-layer output of the backbone, i.e. the input of the classification head, will change greatly during finetuning, making the usual head-only pretraining ineffective. In this paper, we find that parameter-efficient tuning makes a good classification head, with which we can simply replace the randomly initialized heads for a stable performance gain. Our experiments demonstrate that the classification head jointly pretrained with parameter-efficient tuning consistently improves the performance on 9 tasks in GLUE and SuperGLUE.", + "author": "Zhuoyi Yang; Ming Ding; Yanhui Guo; Qingsong Lv; Jie Tang", + "authorids": "/z/zhuoyi-yang/; /m/ming-ding/; /y/yanhui-guo/; /q/qingsong-lv/; /j/jie-tang/", + "bibtex": "@inproceedings{yang-etal-2022-parameter,\n title = \"Parameter-Efficient Tuning Makes a Good Classification Head\",\n author = \"Yang, Zhuoyi and\n Ding, Ming and\n Guo, Yanhui and\n Lv, Qingsong and\n Tang, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.514/\",\n doi = \"10.18653/v1/2022.emnlp-main.514\",\n pages = \"7576--7586\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.514.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.514/", + "pdf_size": 2469603, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12327311704196424748&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "https://github.com/THUDM/Efficient-Head-Finetuning", + "project": "", + "author_num": 5 + }, + { + "id": "2022.findings-emnlp.291", + "title": "Parameter-free Automatically Prompting: A Latent Pseudo Label Mapping Model for Prompt-based Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Prompt-based learning has achieved excellent performance in few-shot learning by mapping the outputs of the pre-trained language model to the labels with the help of a label mapping component. Existing manual label mapping (MLM) methods achieve good results but heavily rely on expensive human knowledge. Automatic label mapping (ALM) methods that learn the mapping functions with extra parameters have shown their potentiality. However, no effective ALM model comparable to MLM methods is developed yet due to the limited data. In this paper, we propose a Latent Pseudo Label Mapping (LPLM) method that optimizes the label mapping without human knowledge and extra parameters. LPLM is built upon a probabilistic latent model and is iteratively self-improved with the EM-style algorithm. The empirical results demonstrate that our LPLM method is superior to the mainstream ALM methods and significantly outperforms the SOTA method in few-shot classification tasks. Moreover, LPLM also shows impressively better performance than the vanilla MLM method which requires extra task-specific prior knowledge.", + "author": "Jirui Qi; Richong Zhang; Junfan Chen; Jaein Kim; Yongyi Mao", + "authorids": "/j/jirui-qi/; /r/richong-zhang/; /j/junfan-chen/; /j/jaein-kim/; /y/yongyi-mao/", + "bibtex": "@inproceedings{qi-etal-2022-parameter,\n title = \"Parameter-free Automatically Prompting: A Latent Pseudo Label Mapping Model for Prompt-based Learning\",\n author = \"Qi, Jirui and\n Zhang, Richong and\n Chen, Junfan and\n Kim, Jaein and\n Mao, Yongyi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.291/\",\n doi = \"10.18653/v1/2022.findings-emnlp.291\",\n pages = \"3952--3962\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.291.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.291/", + "pdf_size": 1977676, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:3BVqhNJqPbQJ:scholar.google.com/&scioq=Parameter-free+Automatically+Prompting:+A+Latent+Pseudo+Label+Mapping+Model+for+Prompt-based+Learning&hl=en&as_sdt=0,5", + "gs_version_total": 0, + "aff": "SKLSDE, Beihang University, Beijing, China; SKLSDE, Beihang University, Beijing, China + Zhongguancun Laboratory, Beijing, China; SKLSDE, Beihang University, Beijing, China; SKLSDE, Beihang University, Beijing, China; School of Electrical Engineering and Computer Science, University of Ottawa, Canada", + "aff_domain": "act.buaa.edu.cn;act.buaa.edu.cn;act.buaa.edu.cn;buaa.edu.cn;uottawa.ca", + "email": "act.buaa.edu.cn;act.buaa.edu.cn;act.buaa.edu.cn;buaa.edu.cn;uottawa.ca", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;0;0;2", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory;University of Ottawa", + "aff_unique_dep": "SKLSDE;;School of Electrical Engineering and Computer Science", + "aff_unique_url": "http://www.buaa.edu.cn;;https://www.uottawa.ca", + "aff_unique_abbr": ";;U Ottawa", + "aff_campus_unique_index": "0;0;0;0;2", + "aff_campus_unique": "Beijing;;Ottawa", + "aff_country_unique_index": "0;0+0;0;0;1", + "aff_country_unique": "China;Canada" + }, + { + "id": "2022.findings-emnlp.191", + "title": "Partially-Random Initialization: A Smoking Gun for Binarization Hypothesis of BERT", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In the past few years, pre-trained BERT has become one of the most popular deep-learning language models due to their remarkable performance in natural language processing (NLP) tasks. However, the superior performance of BERT comes at the cost of high computational and memory complexity, hindering its envisioned widespread deployment in edge devices with limited computing resources. Binarization can alleviate these limitations by reducing storage requirements and improving computing performance. However, obtaining a comparable accuracy performance for binary BERT w.r.t. its full-precision counterpart is still a difficult task. We observe that direct binarization of pre-trained BERT provides a poor initialization during the fine-tuning phase, making the model incapable of achieving a decent accuracy on downstream tasks. Based on this observation, we put forward the following hypothesis: partially randomly-initialized BERT with binary weights and activations can reach to a decent accuracy performance by distilling knowledge from the its full-precision counterpart. We show that BERT with pre-trained embedding layer and randomly-initialized encoder is a smoking gun for this hypothesis. We identify the smoking gun through a series of experiments and show that it yields a new set of state-of-the-art results on the GLUE and SQuAD benchmarks.", + "author": "Arash Ardakani", + "authorids": "/a/arash-ardakani/", + "bibtex": "@inproceedings{ardakani-2022-partially,\n title = \"Partially-Random Initialization: A Smoking Gun for Binarization Hypothesis of {BERT}\",\n author = \"Ardakani, Arash\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.191/\",\n doi = \"10.18653/v1/2022.findings-emnlp.191\",\n pages = \"2603--2612\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.191.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.191/", + "pdf_size": 316242, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11323423551836377241&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "", + "aff_domain": "act.buaa.edu.cn;act.buaa.edu.cn;act.buaa.edu.cn;buaa.edu.cn;uottawa.ca", + "email": "act.buaa.edu.cn;act.buaa.edu.cn;act.buaa.edu.cn;buaa.edu.cn;uottawa.ca", + "github": "", + "project": "", + "author_num": 1 + }, + { + "id": "2022.findings-emnlp.443", + "title": "Partitioned Gradient Matching-based Data Subset Selection for Compute-Efficient Robust ASR Training", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Training state-of-the-art ASR systems such as RNN-T often has a high associated financial and environmental cost. Training with a subset of training data could mitigate this problem if the subset selected could achieve on-par performance with training with the entire dataset. Although there are many data subset selection(DSS) algorithms, direct application to the RNN-T is difficult, especially the DSS algorithms that are adaptive and use learning dynamics such as gradients, as RNN-T tend to have gradients with a significantly larger memory footprint. In this paper, we propose Partitioned Gradient Matching (PGM) a novel distributable DSS algorithm, suitable for massive datasets like those used to train RNN-T. Through extensive experiments on Librispeech 100H and Librispeech 960H, we show that PGM achieves between 3x to 6x speedup with only a very small accuracy degradation (under 1% absolute WER difference). In addition, we demonstrate similar results for PGM even in settings where the training data is corrupted with noise.", + "author": "Ashish Mittal; Durga Sivasubramanian; Rishabh Iyer; Preethi Jyothi; Ganesh Ramakrishnan", + "authorids": "/a/ashish-mittal/; /d/durga-sivasubramanian/; /r/rishabh-iyer/; /p/preethi-jyothi/; /g/ganesh-ramakrishnan/", + "bibtex": "@inproceedings{mittal-etal-2022-partitioned,\n title = \"Partitioned Gradient Matching-based Data Subset Selection for Compute-Efficient Robust {ASR} Training\",\n author = \"Mittal, Ashish and\n Sivasubramanian, Durga and\n Iyer, Rishabh and\n Jyothi, Preethi and\n Ramakrishnan, Ganesh\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.443/\",\n doi = \"10.18653/v1/2022.findings-emnlp.443\",\n pages = \"5999--6010\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.443.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.443/", + "pdf_size": 523812, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5787142191748585863&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 6, + "aff": "IBM Research, India+Indian Institute of Technology Bombay, Mumbai, India; Indian Institute of Technology Bombay, Mumbai, India; The University of Texas at Dallas, Dallas, USA; Indian Institute of Technology Bombay, Mumbai, India; Indian Institute of Technology Bombay, Mumbai, India", + "aff_domain": "in.ibm.com;cse.iitb.ac.in;utdallas.edu;cse.iitb.ac.in;cse.iitb.ac.in", + "email": "in.ibm.com;cse.iitb.ac.in;utdallas.edu;cse.iitb.ac.in;cse.iitb.ac.in", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;2;1;1", + "aff_unique_norm": "IBM Research;Indian Institute of Technology Bombay;The University of Texas at Dallas", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ibm.com/research;https://www.iitb.ac.in;https://www.utdallas.edu", + "aff_unique_abbr": "IBM;IIT Bombay;UT Dallas", + "aff_campus_unique_index": "1;1;2;1;1", + "aff_campus_unique": ";Mumbai;Dallas", + "aff_country_unique_index": "0+0;0;1;0;0", + "aff_country_unique": "India;United States" + }, + { + "id": "2022.emnlp-main.260", + "title": "Passage-Mask: A Learnable Regularization Strategy for Retriever-Reader Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Retriever-reader models achieve competitive performance across many different NLP tasks such as open question answering and dialogue conversations. In this work, we notice these models easily overfit the top-rank retrieval passages and standard training fails to reason over the entire retrieval passages. We introduce a learnable passage mask mechanism which desensitizes the impact from the top-rank retrieval passages and prevents the model from overfitting. Controlling the gradient variance with fewer mask candidates and selecting the mask candidates with one-shot bi-level optimization, our learnable regularization strategy enforces the answer generation to focus on the entire retrieval passages. Experiments on different tasks across open question answering, dialogue conversation, and fact verification show that our method consistently outperforms its baselines. Extensive experiments and ablation studies demonstrate that our method can be general, effective, and beneficial for many NLP tasks.", + "author": "Shujian Zhang; Chengyue Gong; Xingchao Liu", + "authorids": "/s/shujian-zhang/; /c/chengyue-gong/; /x/xingchao-liu/", + "bibtex": "@inproceedings{zhang-etal-2022-passage,\n title = \"Passage-Mask: A Learnable Regularization Strategy for Retriever-Reader Models\",\n author = \"Zhang, Shujian and\n Gong, Chengyue and\n Liu, Xingchao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.260/\",\n doi = \"10.18653/v1/2022.emnlp-main.260\",\n pages = \"3931--3943\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.260.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.260/", + "pdf_size": 351468, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4094178214671973746&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "The University of Texas at Austin; The University of Texas at Austin; The University of Texas at Austin", + "aff_domain": "utexas.edu;utexas.edu;utexas.edu", + "email": "utexas.edu;utexas.edu;utexas.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Texas at Austin", + "aff_unique_dep": "", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.446", + "title": "PcMSP: A Dataset for Scientific Action Graphs Extraction from Polycrystalline Materials Synthesis Procedure Text", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Scientific action graphs extraction from materials synthesis procedures is important for reproducible research, machine automation, and material prediction. But the lack of annotated data has hindered progress in this field. We demonstrate an effort to annotate Polycrystalline Materials Synthesis Procedures PcMSP from 305 open access scientific articles for the construction of synthesis action graphs. This is a new dataset for material science information extraction that simultaneously contains the synthesis sentences extracted from the experimental paragraphs, as well as the entity mentions and intra-sentence relations. A two-step human annotation and inter-annotator agreement study guarantee the high quality of the PcMSP corpus. We introduce four natural language processing tasks: sentence classification, named entity recognition, relation classification, and joint extraction of entities and relations. Comprehensive experiments validate the effectiveness of several state-of-the-art models for these challenges while leaving large space for improvement. We also perform the error analysis and point out some unique challenges that require further investigation. We will release our annotation scheme, the corpus, and codes to the research community to alleviate the scarcity of labeled data in this domain.", + "author": "Xianjun Yang; Ya Zhuo; Julia Zuo; Xinlu Zhang; Stephen Wilson; Linda Petzold", + "authorids": "/x/xianjun-yang/; /y/ya-zhuo/; /j/julia-zuo/; /x/xinlu-zhang/; /s/stephen-wilson/; /l/linda-petzold/", + "bibtex": "@inproceedings{yang-etal-2022-pcmsp,\n title = \"{P}c{MSP}: A Dataset for Scientific Action Graphs Extraction from Polycrystalline Materials Synthesis Procedure Text\",\n author = \"Yang, Xianjun and\n Zhuo, Ya and\n Zuo, Julia and\n Zhang, Xinlu and\n Wilson, Stephen and\n Petzold, Linda\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.446/\",\n doi = \"10.18653/v1/2022.findings-emnlp.446\",\n pages = \"6033--6046\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.446.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.446/", + "pdf_size": 594558, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1968414204907517142&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science+University of California, Santa Barbara; Department of Materials Science and Engineering+University of California, Santa Barbara; Department of Materials Science and Engineering+University of California, Santa Barbara; Department of Computer Science+University of California, Santa Barbara; Department of Materials Science and Engineering+University of California, Santa Barbara; Department of Computer Science+University of California, Santa Barbara", + "aff_domain": "ucsb.edu;ucsb.edu;ucsb.edu;ucsb.edu;ucsb.edu;ucsb.edu", + "email": "ucsb.edu;ucsb.edu;ucsb.edu;ucsb.edu;ucsb.edu;ucsb.edu", + "github": "https://github.com/Xianjun-Yang/PcMSP", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2+1;2+1;0+1;2+1;0+1", + "aff_unique_norm": "Unknown Institution;University of California, Santa Barbara;University of Illinois at Urbana-Champaign", + "aff_unique_dep": "Department of Computer Science;;Department of Materials Science and Engineering", + "aff_unique_url": ";https://www.ucsb.edu;https://mse.illinois.edu/", + "aff_unique_abbr": ";UCSB;UIUC MSE", + "aff_campus_unique_index": "1;2+1;2+1;1;2+1;1", + "aff_campus_unique": ";Santa Barbara;Urbana-Champaign", + "aff_country_unique_index": "1;1+1;1+1;1;1+1;1", + "aff_country_unique": ";United States" + }, + { + "id": "2022.emnlp-main.646", + "title": "Perturbation Augmentation for Fairer NLP", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Unwanted and often harmful social biases are becoming ever more salient in NLP research, affecting both models and datasets. In this work, we ask whether training on demographically perturbed data leads to fairer language models. We collect a large dataset of human annotated text perturbations and train a neural perturbation model, which we show outperforms heuristic alternatives. We find that (i) language models (LMs) pre-trained on demographically perturbed corpora are typically more fair, and (ii) LMs finetuned on perturbed GLUE datasets exhibit less demographic bias on downstream tasks, and (iii) fairness improvements do not come at the expense of performance on downstream tasks. Lastly, we discuss outstanding questions about how best to evaluate the (un)fairness of large language models. We hope that this exploration of neural demographic perturbation will help drive more improvement towards fairer NLP.", + "author": "Rebecca Qian; Candace Ross; Jude Fernandes; Eric Michael Smith; Douwe Kiela; Adina Williams", + "authorids": "/r/rebecca-qian/; /c/candace-ross/; /j/jude-fernandes/; /e/eric-michael-smith/; /d/douwe-kiela/; /a/adina-williams/", + "bibtex": "@inproceedings{qian-etal-2022-perturbation,\n title = \"Perturbation Augmentation for Fairer {NLP}\",\n author = \"Qian, Rebecca and\n Ross, Candace and\n Fernandes, Jude and\n Smith, Eric Michael and\n Kiela, Douwe and\n Williams, Adina\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.646/\",\n doi = \"10.18653/v1/2022.emnlp-main.646\",\n pages = \"9496--9521\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.646.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.646/", + "pdf_size": 1000914, + "gs_citation": 103, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6132566873059432390&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Facebook AI Research; Facebook AI Research; Facebook AI Research; Facebook AI Research; Hugging Face; Facebook AI Research + Hugging Face", + "aff_domain": "fb.com; ; ; ; ;fb.com", + "email": "fb.com; ; ; ; ;fb.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0+1", + "aff_unique_norm": "Facebook;Hugging Face", + "aff_unique_dep": "Facebook AI Research;", + "aff_unique_url": "https://research.facebook.com;https://huggingface.co", + "aff_unique_abbr": "FAIR;Hugging Face", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.67", + "title": "Plug-and-Play VQA: Zero-shot VQA by Conjoining Large Pretrained Models with Zero Training", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Visual question answering (VQA) is a hallmark of vision and language reasoningand a challenging task under the zero-shot setting.We propose Plug-and-Play VQA (PNP-VQA),a modular framework for zero-shot VQA.In contrast to most existing works, which require substantial adaptation of pretrained language models (PLMs) for the vision modality,PNP-VQA requires no additional training of the PLMs.Instead, we propose to use natural language and network interpretation as an intermediate representation that glues pretrained models together. We first generate question-guided informative image captions,and pass the captions to a PLM as context for question answering.Surpassing end-to-end trained baselines, PNP-VQA achieves state-of-the-art results on zero-shot VQAv2 and GQA. With 11B parameters, it outperforms the 80B-parameter Flamingo model by 8.5% on VQAv2. With 738M PLM parameters, PNP-VQA achieves an improvement of 9.1% on GQA over FewVLM with 740M PLM parameters.", + "author": "Anthony Meng Huat Tiong; Junnan Li; Boyang Li; Silvio Savarese; Steven C.H. Hoi", + "authorids": "/a/anthony-meng-huat-tiong/; /j/junnan-li/; /b/boyang-li/; /s/silvio-savarese/; /s/steven-c-h-hoi/", + "bibtex": "@inproceedings{tiong-etal-2022-plug,\n title = \"Plug-and-Play {VQA}: Zero-shot {VQA} by Conjoining Large Pretrained Models with Zero Training\",\n author = \"Tiong, Anthony Meng Huat and\n Li, Junnan and\n Li, Boyang and\n Savarese, Silvio and\n Hoi, Steven C.H.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.67/\",\n doi = \"10.18653/v1/2022.findings-emnlp.67\",\n pages = \"951--967\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.67.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.67/", + "pdf_size": 8786376, + "gs_citation": 122, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3955706247035738413&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Salesforce Research; Salesforce Research; Nanyang Technological University, Singapore; Salesforce Research; Salesforce Research", + "aff_domain": "salesforce.com;salesforce.com;ntu.edu.sg;salesforce.com;salesforce.com", + "email": "salesforce.com;salesforce.com;ntu.edu.sg;salesforce.com;salesforce.com", + "github": "https://github.com/salesforce/LAVIS/tree/main/projects/pnp-vqa", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "Salesforce;Nanyang Technological University", + "aff_unique_dep": "Salesforce Research;", + "aff_unique_url": "https://research.salesforce.com;https://www.ntu.edu.sg", + "aff_unique_abbr": "Salesforce;NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "United States;Singapore" + }, + { + "id": "2022.emnlp-main.733", + "title": "Pneg: Prompt-based Negative Response Generation for Dialogue Response Selection Task", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In retrieval-based dialogue systems, a response selection model acts as a ranker to select the most appropriate response among several candidates. However, such selection models tend to rely on context-response content similarity, which makes models vulnerable to adversarial responses that are semantically similar but not relevant to the dialogue context. Recent studies have shown that leveraging these adversarial responses as negative training samples is useful for improving the discriminating power of the selection model. Nevertheless, collecting human-written adversarial responses is expensive, and existing synthesizing methods often have limited scalability. To overcome these limitations, this paper proposes a simple but efficient method for generating adversarial negative responses leveraging a large-scale language model. Experimental results on dialogue selection tasks show that our method outperforms other methods of synthesizing adversarial negative responses. These results suggest that our method can be an effective alternative to human annotators in generating adversarial responses. Our code and dataset will be released if the paper is accepted.", + "author": "Nyoungwoo Lee; ChaeHun Park; Ho-Jin Choi; Jaegul Choo", + "authorids": "/n/nyoungwoo-lee/; /c/chaehun-park/; /h/ho-jin-choi/; /j/jaegul-choo/", + "bibtex": "@inproceedings{lee-etal-2022-pneg,\n title = \"Pneg: Prompt-based Negative Response Generation for Dialogue Response Selection Task\",\n author = \"Lee, Nyoungwoo and\n Park, ChaeHun and\n Choi, Ho-Jin and\n Choo, Jaegul\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.733/\",\n doi = \"10.18653/v1/2022.emnlp-main.733\",\n pages = \"10692--10703\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.733.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.733/", + "pdf_size": 469977, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15249676395818823642&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 5, + "aff": "Scatter Lab; KAIST AI; KAIST; KAIST AI", + "aff_domain": "scatterlab.co.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "scatterlab.co.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "https://github.com/leenw23/generating-negatives-by-gpt3", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;1", + "aff_unique_norm": "Scatter Lab;Korea Advanced Institute of Science and Technology", + "aff_unique_dep": ";KAIST AI", + "aff_unique_url": ";https://www.kaist.edu", + "aff_unique_abbr": ";KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "1;1;1", + "aff_country_unique": ";South Korea" + }, + { + "id": "2022.findings-emnlp.268", + "title": "PoeLM: A Meter- and Rhyme-Controllable Language Model for Unsupervised Poetry Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Formal verse poetry imposes strict constraints on the meter and rhyme scheme of poems. Most prior work on generating this type of poetry uses existing poems for supervision, which are difficult to obtain for most languages and poetic forms. In this work, we propose an unsupervised approach to generate poems that follow any given meter and rhyme scheme, without requiring any poetic text for training. Our method works by splitting a regular, non-poetic corpus into phrases, prepending control codes that describe the length and end rhyme of each phrase, and training a transformer language model in the augmented corpus. The transformer learns to link the structure descriptor with the control codes to the number of lines, their length and their end rhyme. During inference, we build control codes for the desired meter and rhyme scheme, and condition our language model on them to generate formal verse poetry. Experiments in Spanish and Basque show that our approach is able to generate valid poems, which are often comparable in quality to those written by humans.", + "author": "Aitor Ormazabal; Mikel Artetxe; Manex Agirrezabal; Aitor Soroa; Eneko Agirre", + "authorids": "/a/aitor-ormazabal/; /m/mikel-artetxe/; /m/manex-agirrezabal/; /a/aitor-soroa/; /e/eneko-agirre/", + "bibtex": "@inproceedings{ormazabal-etal-2022-poelm,\n title = \"{P}oe{LM}: A Meter- and Rhyme-Controllable Language Model for Unsupervised Poetry Generation\",\n author = \"Ormazabal, Aitor and\n Artetxe, Mikel and\n Agirrezabal, Manex and\n Soroa, Aitor and\n Agirre, Eneko\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.268/\",\n doi = \"10.18653/v1/2022.findings-emnlp.268\",\n pages = \"3655--3670\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.268.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.268/", + "pdf_size": 285267, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15475043911298827422&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 5, + "aff": "HiTZ Center, University of the Basque Country (UPV/EHU); Meta AI; University of Copenhagen; HiTZ Center, University of the Basque Country (UPV/EHU); HiTZ Center, University of the Basque Country (UPV/EHU)", + "aff_domain": "ehu.eus;meta.com;hum.ku.dk;ehu.eus;ehu.eus", + "email": "ehu.eus;meta.com;hum.ku.dk;ehu.eus;ehu.eus", + "github": "https://github.com/aitorormazabal/poetry_generation3655", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;0", + "aff_unique_norm": "University of the Basque Country;Meta Platforms, Inc.;University of Copenhagen", + "aff_unique_dep": "HiTZ Center;Meta AI;", + "aff_unique_url": "https://www.ehu.eus/en;https://meta.com;https://www.ku.dk", + "aff_unique_abbr": "UPV/EHU;Meta;UCPH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;0;0", + "aff_country_unique": "Spain;United States;Denmark" + }, + { + "id": "2022.emnlp-main.674", + "title": "Polyglot Prompt: Multilingual Multitask Prompt Training", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This paper aims for a potential architectural improvement for multilingual learning and asks: Can different tasks from different languages be modeled in a monolithic framework, i.e. without any task/language-specific module? The benefit of achieving this could open new doors for future multilingual research, including allowing systems trained on low resources to be further assisted by other languages as well as other tasks. We approach this goal by developing a learning framework named Polyglot Prompting to exploit prompting methods for learning a unified semantic space for different languages and tasks with multilingual prompt engineering. We performed a comprehensive evaluation of 6 tasks, namely topic classification, sentiment classification, named entity recognition, question answering, natural language inference, and summarization, covering 24 datasets and 49 languages. The experimental results demonstrated the efficacy of multilingual multitask prompt-based learning and led to inspiring observations. We also present an interpretable multilingual evaluation methodology and show how the proposed framework, multilingual multitask prompt training, works. We release all datasets prompted in the best setting and code.", + "author": "Jinlan Fu; See-Kiong Ng; Pengfei Liu", + "authorids": "/j/jinlan-fu/; /s/see-kiong-ng/; /p/pengfei-liu/", + "bibtex": "@inproceedings{fu-etal-2022-polyglot,\n title = \"Polyglot Prompt: Multilingual Multitask Prompt Training\",\n author = \"Fu, Jinlan and\n Ng, See-Kiong and\n Liu, Pengfei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.674/\",\n doi = \"10.18653/v1/2022.emnlp-main.674\",\n pages = \"9919--9935\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.674.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.674/", + "pdf_size": 1084527, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16532836813039969220&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "NUS; NUS; CMU & Inspired Cognition", + "aff_domain": "nus.edu.sg;nus.edu.sg;gmail.com", + "email": "nus.edu.sg;nus.edu.sg;gmail.com", + "github": "https://github.com/jinlanfu/Polyglot_Prompt", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "National University of Singapore;Carnegie Mellon University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.nus.edu.sg;https://www.cmu.edu", + "aff_unique_abbr": "NUS;CMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1", + "aff_country_unique": "Singapore;United States" + }, + { + "id": "2022.emnlp-main.764", + "title": "Pre-training Language Models with Deterministic Factual Knowledge", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Previous works show that Pre-trained Language Models (PLMs) can capture factual knowledge. However, some analyses reveal that PLMs fail to perform it robustly, e.g., being sensitive to the changes of prompts when extracting factual knowledge. To mitigate this issue, we propose to let PLMs learn the deterministic relationship between the remaining context and the masked content. The deterministic relationship ensures that the masked factual content can be deterministically inferable based on the existing clues in the context. That would provide more stable patterns for PLMs to capture factual knowledge than randomly masking. Two pre-training tasks are further introduced to motivate PLMs to rely on the deterministic relationship when filling masks. Specifically, we use an external Knowledge Base (KB) to identify deterministic relationships and continuously pre-train PLMs with the proposed methods. The factual knowledge probing experiments indicate that the continuously pre-trained PLMs achieve better robustness in factual knowledge capturing. Further experiments on question-answering datasets show that trying to learn a deterministic relationship with the proposed methods can also help other knowledge-intensive tasks.", + "author": "Shaobo Li; Xiaoguang Li; Lifeng Shang; Chengjie Sun; Bingquan Liu; Zhenzhou Ji; Xin Jiang; Qun Liu", + "authorids": "/s/shaobo-li/; /x/xiaoguang-li/; /l/lifeng-shang/; /c/cheng-jie-sun/; /b/bingquan-liu/; /z/zhenzhou-ji/; /x/xin-jiang/; /q/qun-liu/", + "bibtex": "@inproceedings{li-etal-2022-pre-training,\n title = \"Pre-training Language Models with Deterministic Factual Knowledge\",\n author = \"Li, Shaobo and\n Li, Xiaoguang and\n Shang, Lifeng and\n Sun, Chengjie and\n Liu, Bingquan and\n Ji, Zhenzhou and\n Jiang, Xin and\n Liu, Qun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.764/\",\n doi = \"10.18653/v1/2022.emnlp-main.764\",\n pages = \"11118--11131\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.764.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.764/", + "pdf_size": 486394, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5626203283527943087&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Harbin Institute of Technology; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab; Harbin Institute of Technology; Harbin Institute of Technology; Harbin Institute of Technology; Huawei Noah\u2019s Ark Lab; Huawei Noah\u2019s Ark Lab", + "aff_domain": "insun.hit.edu.cn;huawei.com;huawei.com;hit.edu.cn;hit.edu.cn;hit.edu.cn;huawei.com;huawei.com", + "email": "insun.hit.edu.cn;huawei.com;huawei.com;hit.edu.cn;hit.edu.cn;hit.edu.cn;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;1;0;0;0;1;1", + "aff_unique_norm": "Harbin Institute of Technology;Huawei", + "aff_unique_dep": ";Noah\u2019s Ark Lab", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.huawei.com", + "aff_unique_abbr": "HIT;Huawei", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Harbin;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.810", + "title": "Pre-training Transformer Models with Sentence-Level Objectives for Answer Sentence Selection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "An important task for designing QA systems is answer sentence selection (AS2): selecting the sentence containing (or constituting) the answer to a question from a set of retrieved relevant documents. In this paper, we propose three novel sentence-level transformer pre-training objectives that incorporate paragraph-level semantics within and across documents, to improve the performance of transformers for AS2, and mitigate the requirement of large labeled datasets. Specifically, the model is tasked to predict whether: (i) two sentences are extracted from the same paragraph, (ii) a given sentence is extracted from a given paragraph, and (iii) two paragraphs are extracted from the same document. Our experiments on three public and one industrial AS2 datasets demonstrate the empirical superiority of our pre-trained transformers over baseline models such as RoBERTa and ELECTRA for AS2.", + "author": "Luca Di Liello; Siddhant Garg; Luca Soldaini; Alessandro Moschitti", + "authorids": "/l/luca-di-liello/; /s/siddhant-garg/; /l/luca-soldaini/; /a/alessandro-moschitti/", + "bibtex": "@inproceedings{di-liello-etal-2022-pre,\n title = \"Pre-training Transformer Models with Sentence-Level Objectives for Answer Sentence Selection\",\n author = \"Di Liello, Luca and\n Garg, Siddhant and\n Soldaini, Luca and\n Moschitti, Alessandro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.810/\",\n doi = \"10.18653/v1/2022.emnlp-main.810\",\n pages = \"11806--11816\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.810.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.810/", + "pdf_size": 225880, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11590180078634319174&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "University of Trento; Amazon Alexa AI; Allen Institute for AI + Amazon Alexa AI; Amazon Alexa AI", + "aff_domain": "unitn.it;amazon.com;allenai.org;amazon.com", + "email": "unitn.it;amazon.com;allenai.org;amazon.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2+1;1", + "aff_unique_norm": "University of Trento;Amazon;Allen Institute for AI", + "aff_unique_dep": ";Alexa AI;", + "aff_unique_url": "https://www.unitn.it;https://www.amazon.com;https://allenai.org", + "aff_unique_abbr": "UniTN;Amazon;AI2", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1+1;1", + "aff_country_unique": "Italy;United States" + }, + { + "id": "2022.emnlp-main.767", + "title": "PreQuEL: Quality Estimation of Machine Translation Outputs in Advance", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present the task of PreQuEL, Pre-(Quality-Estimation) Learning. A PreQuEL system predicts how well a given sentence will be translated, without recourse to the actual translation, thus eschewing unnecessary resource allocation when translation quality is bound to be low. PreQuEL can be defined relative to a given MT system (e.g., some industry service) or generally relative to the state-of-the-art.From a theoretical perspective, PreQuEL places the focus on the source text, tracing properties, possibly linguistic features, that make a sentence harder to machine translate.We develop a baseline model for the task and analyze its performance. We also develop a data augmentation method (from parallel corpora), that improves results substantially. We show that this augmentation method can improve the performance of the Quality-Estimation task as well.We investigate the properties of the input text that our model is sensitive to, by testing it on challenge sets and different languages. We conclude that it is aware of syntactic and semantic distinctions, and correlates and even over-emphasizes the importance of standard NLP features.", + "author": "Shachar Don-Yehiya; Leshem Choshen; Omri Abend", + "authorids": "/s/shachar-don-yehiya/; /l/leshem-choshen/; /o/omri-abend/", + "bibtex": "@inproceedings{don-yehiya-etal-2022-prequel,\n title = \"{P}re{Q}u{EL}: Quality Estimation of Machine Translation Outputs in Advance\",\n author = \"Don-Yehiya, Shachar and\n Choshen, Leshem and\n Abend, Omri\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.767/\",\n doi = \"10.18653/v1/2022.emnlp-main.767\",\n pages = \"11170--11183\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.767.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.767/", + "pdf_size": 353483, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10720450949481396892&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Computer Science and Engineering, The Hebrew University of Jerusalem; School of Computer Science and Engineering, The Hebrew University of Jerusalem; School of Computer Science and Engineering, The Hebrew University of Jerusalem", + "aff_domain": "mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il", + "email": "mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il", + "github": "https://github.com/shachardon/PreQuEL", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "The Hebrew University of Jerusalem", + "aff_unique_dep": "School of Computer Science and Engineering", + "aff_unique_url": "https://www.huji.ac.il", + "aff_unique_abbr": "HUJI", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Jerusalem", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.emnlp-main.482", + "title": "Precisely the Point: Adversarial Augmentations for Faithful and Informative Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Though model robustness has been extensively studied in language understanding, the robustness of Seq2Seq generation remains understudied.In this paper, we conduct the first quantitative analysis on the robustness of pre-trained Seq2Seq models. We find that even current SOTA pre-trained Seq2Seq model (BART) is still vulnerable, which leads to significant degeneration in faithfulness and informativeness for text generation tasks.This motivated us to further propose a novel adversarial augmentation framework, namely AdvSeq, for generally improving faithfulness and informativeness of Seq2Seq models via enhancing their robustness. AdvSeq automatically constructs two types of adversarial augmentations during training, including implicit adversarial samples by perturbing word representations and explicit adversarial samples by word swapping, both of which effectively improve Seq2Seq robustness.Extensive experiments on three popular text generation tasks demonstrate that AdvSeq significantly improves both the faithfulness and informativeness of Seq2Seq generation under both automatic and human evaluation settings.", + "author": "Wenhao Wu; Wei Li; Jiachen Liu; Xinyan Xiao; Sujian Li; Yajuan Lyu", + "authorids": "/w/wenhao-wu/; /w/wei-li/; /j/jiachen-liu/; /x/xinyan-xiao/; /s/sujian-li/; /y/yajuan-lyu/", + "bibtex": "@inproceedings{wu-etal-2022-precisely,\n title = \"Precisely the Point: Adversarial Augmentations for Faithful and Informative Text Generation\",\n author = \"Wu, Wenhao and\n Li, Wei and\n Liu, Jiachen and\n Xiao, Xinyan and\n Li, Sujian and\n Lyu, Yajuan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.482/\",\n doi = \"10.18653/v1/2022.emnlp-main.482\",\n pages = \"7160--7176\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.482.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.482/", + "pdf_size": 368648, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1511070935188301937&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 3, + "aff": "Key Laboratory of Computational Linguistics, MOE, Peking University+Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Baidu Inc., Beijing, China; Key Laboratory of Computational Linguistics, MOE, Peking University; Baidu Inc., Beijing, China", + "aff_domain": "pku.edu.cn;baidu.com;baidu.com;baidu.com;pku.edu.cn;baidu.com", + "email": "pku.edu.cn;baidu.com;baidu.com;baidu.com;pku.edu.cn;baidu.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;1;1;0;1", + "aff_unique_norm": "Peking University;Baidu Inc.", + "aff_unique_dep": "Key Laboratory of Computational Linguistics;", + "aff_unique_url": "http://www.pku.edu.cn;https://www.baidu.com", + "aff_unique_abbr": "PKU;Baidu", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.793", + "title": "Predicting Fine-Tuning Performance with Probing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large NLP models have recently shown impressive performance in language understanding tasks, typically evaluated by their fine-tuned performance. Alternatively, probing has received increasing attention as being a lightweight method for interpreting the intrinsic mechanisms of large NLP models. In probing, post-hoc classifiers are trained on \u201cout-of-domain\u201d datasets that diagnose specific abilities. While probing the language models has led to insightful findings, they appear disjointed from the development of models. This paper explores the utility of probing deep NLP models to extract a proxy signal widely used in model development \u2013 the fine-tuning performance. We find that it is possible to use the accuracies of only three probing tests to predict the fine-tuning performance with errors 40% - 80% smaller than baselines. We further discuss possible avenues where probing can empower the development of deep NLP models.", + "author": "Zining Zhu; Soroosh Shahtalebi; Frank Rudzicz", + "authorids": "/z/zining-zhu/; /s/soroosh-shahtalebi/; /f/frank-rudzicz/", + "bibtex": "@inproceedings{zhu-etal-2022-predicting,\n title = \"Predicting Fine-Tuning Performance with Probing\",\n author = \"Zhu, Zining and\n Shahtalebi, Soroosh and\n Rudzicz, Frank\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.793/\",\n doi = \"10.18653/v1/2022.emnlp-main.793\",\n pages = \"11534--11547\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.793.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.793/", + "pdf_size": 391528, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12863184631448388225&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Toronto+Vector Institute for Artificial Intelligence; Vector Institute for Artificial Intelligence; University of Toronto+Vector Institute for Artificial Intelligence+Unity Health Toronto", + "aff_domain": "cs.toronto.edu;vectorinstitute.ai;spoclab.com", + "email": "cs.toronto.edu;vectorinstitute.ai;spoclab.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;0+1+2", + "aff_unique_norm": "University of Toronto;Vector Institute for Artificial Intelligence;Unity Health Toronto", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.utoronto.ca;https://vectorinstitute.ai/;https://www.unityhealth.to", + "aff_unique_abbr": "U of T;Vector Institute;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0+0+0", + "aff_country_unique": "Canada" + }, + { + "id": "2022.findings-emnlp.418", + "title": "Predicting Long-Term Citations from Short-Term Linguistic Influence", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A standard measure of the influence of a research paper is the number of times it is cited. However, papers may be cited for many reasons, and citation count is not informative about the extent to which a paper affected the content of subsequent publications. We therefore propose a novel method to quantify linguistic influence in timestamped document collections. There are two main steps: first, identify lexical and semantic changes using contextual embeddings and word frequencies; second, aggregate information about these changes into per-document influence parameters by estimating a high-dimensional Hawkes process with a low-rank parameter matrix. The resulting measures of linguistic influence are predictive of future citations. Specifically, the estimate of linguistic influence from the two years after a paper\u2019s publication is correlated with and predictive of its citation count in the following three years. This is demonstrated using an online evaluation with incremental temporal training/test splits, in comparison with a strong baseline that includes predictors for initial citation counts, topics, and lexical features.", + "author": "Sandeep Soni; David Bamman; Jacob Eisenstein", + "authorids": "/s/sandeep-soni/; /d/david-bamman/; /j/jacob-eisenstein/", + "bibtex": "@inproceedings{soni-etal-2022-predicting,\n title = \"Predicting Long-Term Citations from Short-Term Linguistic Influence\",\n author = \"Soni, Sandeep and\n Bamman, David and\n Eisenstein, Jacob\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.418/\",\n doi = \"10.18653/v1/2022.findings-emnlp.418\",\n pages = \"5700--5716\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.418.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.418/", + "pdf_size": 463893, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17266351937237055318&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of California, Berkeley; University of California, Berkeley; Google Research", + "aff_domain": "berkeley.edu;berkeley.edu;google.com", + "email": "berkeley.edu;berkeley.edu;google.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "University of California, Berkeley;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.berkeley.edu;https://research.google", + "aff_unique_abbr": "UC Berkeley;Google Research", + "aff_campus_unique_index": "0;0;1", + "aff_campus_unique": "Berkeley;Mountain View", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.585", + "title": "Predicting Prerequisite Relations for Unseen Concepts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Concept prerequisite learning (CPL) plays a key role in developing technologies that assist people to learn a new complex topic or concept. Previous work commonly assumes that all concepts are given at training time and solely focuses on predicting the unseen prerequisite relationships between them. However, many real-world scenarios deal with concepts that are left undiscovered at training time, which is relatively unexplored. This paper studies this problem and proposes a novel alternating knowledge distillation approach to take advantage of both content- and graph-based models for this task. Extensive experiments on three public benchmarks demonstrate up to 10% improvements in terms of F1 score.", + "author": "Yaxin Zhu; Hamed Zamani", + "authorids": "/y/yaxin-zhu/; /h/hamed-zamani/", + "bibtex": "@inproceedings{zhu-zamani-2022-predicting,\n title = \"Predicting Prerequisite Relations for Unseen Concepts\",\n author = \"Zhu, Yaxin and\n Zamani, Hamed\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.585/\",\n doi = \"10.18653/v1/2022.emnlp-main.585\",\n pages = \"8542--8548\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.585.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.585/", + "pdf_size": 538581, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9592716481816105681&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Center for Intelligent Information Retrieval, University of Massachusetts Amherst; Center for Intelligent Information Retrieval, University of Massachusetts Amherst", + "aff_domain": "cs.umass.edu;cs.umass.edu", + "email": "cs.umass.edu;cs.umass.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Massachusetts Amherst", + "aff_unique_dep": "Center for Intelligent Information Retrieval", + "aff_unique_url": "https://www.umass.edu", + "aff_unique_abbr": "UMass Amherst", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Amherst", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.269", + "title": "ProGen: Progressive Zero-shot Dataset Generation via In-context Feedback", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recently, dataset-generation-based zero-shot learning has shown promising results by training a task-specific model with a dataset synthesized from large pre-trained language models (PLMs). The final task-specific model often achieves compatible or even better performance than PLMs under the zero-shot setting, with orders of magnitude fewer parameters.However, synthetic datasets have their drawbacks. They have long being suffering from the low-quality issue (e.g., low informativeness, redundancy). This explains why the massive synthetic data does not lead to better performance \u2013 a scenario we would expect in the human-labeled data. To improve the quality in dataset synthesis, we propose a progressive zero-shot dataset generation framework, ProGen, which leverages the feedback from the task-specific model to guide the generation of new training data via in-context examples.Extensive experiments on five text classification datasets demonstrate the effectiveness of the proposed approach. We also show ProGen achieves on-par or superior performance with only 1% synthetic dataset size, when comparing to baseline methods without in-context feedback.", + "author": "Jiacheng Ye; Jiahui Gao; Zhiyong Wu; Jiangtao Feng; Tao Yu; Lingpeng Kong", + "authorids": "/j/jiacheng-ye/; /j/jiahui-gao/; /z/zhiyong-wu/; /j/jiangtao-feng/; /t/tao-yu/; /l/lingpeng-kong/", + "bibtex": "@inproceedings{ye-etal-2022-progen,\n title = \"{P}ro{G}en: Progressive Zero-shot Dataset Generation via In-context Feedback\",\n author = \"Ye, Jiacheng and\n Gao, Jiahui and\n Wu, Zhiyong and\n Feng, Jiangtao and\n Yu, Tao and\n Kong, Lingpeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.269/\",\n doi = \"10.18653/v1/2022.findings-emnlp.269\",\n pages = \"3671--3683\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.269.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.269/", + "pdf_size": 584020, + "gs_citation": 69, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11020744800265558778&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Shanghai AI Laboratory + The University of Hong Kong; The University of Hong Kong; Shanghai AI Laboratory; Shanghai AI Laboratory; The University of Hong Kong + University of Washington; The University of Hong Kong + Shanghai AI Laboratory", + "aff_domain": "connect.hku.hk;connect.hku.hk;pjlab.org.cn;pjlab.org.cn;cs.hku.hk;cs.hku.hk", + "email": "connect.hku.hk;connect.hku.hk;pjlab.org.cn;pjlab.org.cn;cs.hku.hk;cs.hku.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;0;0;1+2;1+0", + "aff_unique_norm": "Shanghai AI Laboratory;The University of Hong Kong;University of Washington", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.shanghai-ai-lab.com;https://www.hku.hk;https://www.washington.edu", + "aff_unique_abbr": "SAIL;HKU;UW", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0+1;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.421", + "title": "Probing Cross-modal Semantics Alignment Capability from the Textual Perspective", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In recent years, vision and language pre-training (VLP) models have advanced the state-of-the-art results in a variety of cross-modal downstream tasks. Aligning cross-modal semantics is claimed to be one of the essential capabilities of VLP models. However, it still remains unclear about the inner working mechanism of alignment in VLP models. In this paper, we propose a new probing method that is based on image captioning to first empirically study the cross-modal semantics alignment of VLP models. Our probing method is built upon the fact that given an image-caption pair, the VLP models will give a score, indicating how well two modalities are aligned; maximizing such scores will generate sentences that VLP models believe are of good alignment. Analyzing these sentences thus will reveal in what way different modalities are aligned and how well these alignments are in VLP models. We apply our probing method to five popular VLP models, including UNITER, ROSITA, ViLBERT, CLIP, and LXMERT, and provide a comprehensive analysis of the generated captions guided by these models. Our results show that VLP models (1) focus more on just aligning objects with visual words, while neglecting global semantics; (2) prefer fixed sentence patterns, thus ignoring more important textual information including fluency and grammar; and (3) deem the captions with more visual words are better aligned with images. These findings indicate that VLP models still have weaknesses in cross-modal semantics alignment and we hope this work will draw researchers\u2019 attention to such problems when designing a new VLP model.", + "author": "Zheng Ma; Shi Zong; Mianzhi Pan; Jianbing Zhang; Shujian Huang; Xinyu Dai; Jiajun Chen", + "authorids": "/z/zheng-ma/; /s/shi-zong/; /m/mianzhi-pan/; /j/jianbing-zhang/; /s/shujian-huang/; /x/xinyu-dai/; /j/jiajun-chen/", + "bibtex": "@inproceedings{ma-etal-2022-probing,\n title = \"Probing Cross-modal Semantics Alignment Capability from the Textual Perspective\",\n author = \"Ma, Zheng and\n Zong, Shi and\n Pan, Mianzhi and\n Zhang, Jianbing and\n Huang, Shujian and\n Dai, Xinyu and\n Chen, Jiajun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.421/\",\n doi = \"10.18653/v1/2022.findings-emnlp.421\",\n pages = \"5739--5749\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.421.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.421/", + "pdf_size": 639432, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6112958552325546524&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Nanjing University; Nanjing University; Nanjing University; Nanjing University; Nanjing University; Nanjing University; Nanjing University", + "aff_domain": "smail.nju.edu.cn;nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn;nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn;nju.edu.cn;nju.edu.cn", + "github": "https://github.com/aaronma2020/probing_vlp", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.289", + "title": "Probing Relational Knowledge in Language Models via Word Analogies", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Understanding relational knowledge plays an integral part in natural language comprehension. When it comes to pre-trained language models (PLM), prior work has been focusing on probing relational knowledge this by filling the blanks in pre-defined prompts such as \u201cThe capital of France is \u2014\". However, these probes may be affected by the co-occurrence of target relation words and entities (e.g. \u201ccapital\u201d, \u201cFrance\u201d and \u201cParis\u201d) in the pre-training corpus. In this work, we extend these probing methodologies leveraging analogical proportions as a proxy to probe relational knowledge in transformer-based PLMs without directly presenting the desired relation. In particular, we analysed the ability of PLMs to understand (1) the directionality of a given relation (e.g. Paris-France is not the same as France-Paris); (2) the ability to distinguish types on a given relation (both France and Japan are countries); and (3) the relation itself (Paris is the capital of France, but not Rome). Our results show how PLMs are extremely accurate at (1) and (2), but have clear room for improvement for (3). To better understand the reasons behind this behaviour and mistakes made by PLMs, we provide an extended quantitative analysis based on relevant factors such as frequency.", + "author": "Kiamehr Rezaee; Jose Camacho-Collados", + "authorids": "/k/kiamehr-rezaee/; /j/jose-camacho-collados/", + "bibtex": "@inproceedings{rezaee-camacho-collados-2022-probing,\n title = \"Probing Relational Knowledge in Language Models via Word Analogies\",\n author = \"Rezaee, Kiamehr and\n Camacho-Collados, Jose\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.289/\",\n doi = \"10.18653/v1/2022.findings-emnlp.289\",\n pages = \"3930--3936\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.289.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.289/", + "pdf_size": 221939, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1792943073961152232&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "Cardiff NLP, School of Computer Science and Informatics, Cardiff University, United Kingdom; Cardiff NLP, School of Computer Science and Informatics, Cardiff University, United Kingdom", + "aff_domain": "cardiff.ac.uk;cardiff.ac.uk", + "email": "cardiff.ac.uk;cardiff.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Cardiff University", + "aff_unique_dep": "School of Computer Science and Informatics", + "aff_unique_url": "https://www.cardiff.ac.uk", + "aff_unique_abbr": "Cardiff", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Cardiff", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.findings-emnlp.264", + "title": "Probing Structural Knowledge from Pre-trained Language Model for Argumentation Relation Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Extracting fine-grained structural information between argumentation component (AC) pairs is essential for argumentation relation classification (ARC). However, most previous studies attempt to model the relationship between AC pairs using AC level similarity or semantically relevant features. They ignore the complex interaction between AC pairs and cannot effectively reason the argumentation relation deeply.Therefore, in this paper, we propose a novel dual prior graph neural network (DPGNN) to jointly explore the probing knowledge derived from pre-trained language models (PLMs) and the syntactical information for comprehensively modeling the relationship between AC pairs. Specifically, we construct a probing graph by using probing knowledge derived from PLMs to recognize and align the relational information within and across the argumentation components. In addition, we propose a mutual dependency graph for the AC pair to reason the fine-grained syntactic structural information, in which the syntactical correlation between words is set by the dependency information within AC and mutual attention mechanism across ACs. The knowledge learned from the probing graph and the dependency graph are combined to comprehensively capture the aligned relationships of AC pairs for improving the results of ARC. Experimental results on three public datasets show that DPGNN outperforms the state-of-the-art baselines by a noticeable margin.", + "author": "Yang Sun; Bin Liang; Jianzhu Bao; Min Yang; Ruifeng Xu", + "authorids": "/y/yang-sun/; /b/bin-liang/; /j/jianzhu-bao/; /m/min-yang/; /r/ruifeng-xu/", + "bibtex": "@inproceedings{sun-etal-2022-probing,\n title = \"Probing Structural Knowledge from Pre-trained Language Model for Argumentation Relation Classification\",\n author = \"Sun, Yang and\n Liang, Bin and\n Bao, Jianzhu and\n Yang, Min and\n Xu, Ruifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.264/\",\n doi = \"10.18653/v1/2022.findings-emnlp.264\",\n pages = \"3605--3615\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.264.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.264/", + "pdf_size": 511020, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17714800524493577109&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "Harbin Institute of Technology, Shenzhen, China + Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Institute of Technology, Shenzhen, China + Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Institute of Technology, Shenzhen, China + Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; SIAT, Chinese Academy of Sciences, Shenzhen, China; Harbin Institute of Technology, Shenzhen, China + Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies + Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "mail.ustc.edu.cn;stu.hit.edu.cn;gmail.com;siat.ac.cn;hit.edu.cn", + "email": "mail.ustc.edu.cn;stu.hit.edu.cn;gmail.com;siat.ac.cn;hit.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;2;0+1+3", + "aff_unique_norm": "Harbin Institute of Technology;Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies;Shenzhen Institute of Advanced Technology;Peng Cheng Laboratory", + "aff_unique_dep": ";Provincial Key Laboratory of Novel Security Intelligence Technologies;;", + "aff_unique_url": "http://en.hhit.edu.cn/;;http://www.siat.ac.cn;", + "aff_unique_abbr": "HIT;;SIAT;", + "aff_campus_unique_index": "0;0;0;0;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0+0;0+0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.502", + "title": "Probing for Constituency Structure in Neural Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this paper, we investigate to which extent contextual neural language models (LMs) implicitly learn syntactic structure. More concretely, we focus on constituent structure as represented in the Penn Treebank (PTB). Using standard probing techniques based on diagnostic classifiers, we assess the accuracy of representing constituents of different categories within the neuron activations of a LM such as RoBERTa. In order to make sure that our probe focuses on syntactic knowledge and not on implicit semantic generalizations, we also experiment on a PTB version that is obtained by randomly replacing constituents with each other while keeping syntactic structure, i.e., a semantically ill-formed but syntactically well-formed version of the PTB. We find that 4 pretrained transfomer LMs obtain high performance on our probing tasks even on manipulated data, suggesting that semantic and syntactic knowledge in their representations can be separated and that constituency information is in fact learned by the LM. Moreover, we show that a complete constituency tree can be linearly separated from LM representations.", + "author": "David Arps; Younes Samih; Laura Kallmeyer; Hassan Sajjad", + "authorids": "/d/david-arps/; /y/younes-samih/; /l/laura-kallmeyer/; /h/hassan-sajjad/", + "bibtex": "@inproceedings{arps-etal-2022-probing,\n title = \"Probing for Constituency Structure in Neural Language Models\",\n author = \"Arps, David and\n Samih, Younes and\n Kallmeyer, Laura and\n Sajjad, Hassan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.502/\",\n doi = \"10.18653/v1/2022.findings-emnlp.502\",\n pages = \"6738--6757\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.502.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.502/", + "pdf_size": 371113, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11064538434615591151&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Heinrich Heine University D\u00fcsseldorf, Germany; Heinrich Heine University D\u00fcsseldorf, Germany; Heinrich Heine University D\u00fcsseldorf, Germany; Faculty of Computer Science, Dalhousie University, Canada", + "aff_domain": "hhu.de;hhu.de;hhu.de;dal.ca", + "email": "hhu.de;hhu.de;hhu.de;dal.ca", + "github": "https://github.com/davidarps/constptbprobing", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Heinrich Heine University;Dalhousie University", + "aff_unique_dep": ";Faculty of Computer Science", + "aff_unique_url": "https://www.hhu.de;https://www.dal.ca", + "aff_unique_abbr": "HHU;Dal", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "D\u00fcsseldorf;", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "Germany;Canada" + }, + { + "id": "2022.findings-emnlp.203", + "title": "Probing for Incremental Parse States in Autoregressive Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Next-word predictions from autoregressive neural language models show remarkable sensitivity to syntax. This work evaluates the extent to which this behavior arises as a result of a learned ability to maintain implicit representations of incremental syntactic structures. We extend work in syntactic probing to the incremental setting and present several probes for extracting incomplete syntactic structure (operationalized through parse states from a stack-based parser) from autoregressive language models. We find that our probes can be used to predict model preferences on ambiguous sentence prefixes and causally intervene on model representations and steer model behavior. This suggests implicit incremental syntactic inferences underlie next-word predictions in autoregressive neural language models.", + "author": "Tiwalayo Eisape; Vineet Gangireddy; Roger Levy; Yoon Kim", + "authorids": "/t/tiwalayo-eisape/; /v/vineet-gangireddy/; /r/roger-levy/; /y/yoon-kim/", + "bibtex": "@inproceedings{eisape-etal-2022-probing,\n title = \"Probing for Incremental Parse States in Autoregressive Language Models\",\n author = \"Eisape, Tiwalayo and\n Gangireddy, Vineet and\n Levy, Roger and\n Kim, Yoon\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.203/\",\n doi = \"10.18653/v1/2022.findings-emnlp.203\",\n pages = \"2801--2813\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.203.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.203/", + "pdf_size": 3812944, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4363146077318133153&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "MIT; Harvard University; MIT; MIT", + "aff_domain": "mit.edu;college.harvard.edu;mit.edu;mit.edu", + "email": "mit.edu;college.harvard.edu;mit.edu;mit.edu", + "github": "https://github.com/eisape/incremental_parse_probe", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Harvard University", + "aff_unique_dep": ";", + "aff_unique_url": "https://web.mit.edu;https://www.harvard.edu", + "aff_unique_abbr": "MIT;Harvard", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.82", + "title": "Progressive Sentiment Analysis for Code-Switched Text Data", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multilingual transformer language models have recently attracted much attention from researchers and are used in cross-lingual transfer learning for many NLP tasks such as text classification and named entity recognition.However, similar methods for transfer learning from monolingual text to code-switched text have not been extensively explored mainly due to the following challenges:(1) Code-switched corpus, unlike monolingual corpus, consists of more than one language and existing methods can\u2019t be applied efficiently,(2) Code-switched corpus is usually made of resource-rich and low-resource languages and upon using multilingual pre-trained language models, the final model might bias towards resource-rich language. In this paper, we focus on code-switched sentiment analysis where we have a labelled resource-rich language dataset and unlabelled code-switched data. We propose a framework that takes the distinction between resource-rich and low-resource language into account.Instead of training on the entire code-switched corpus at once, we create buckets based on the fraction of words in the resource-rich language and progressively train from resource-rich language dominated samples to low-resource language dominated samples. Extensive experiments across multiple language pairs demonstrate that progressive training helps low-resource language dominated samples.", + "author": "Sudhanshu Ranjan; Dheeraj Mekala; Jingbo Shang", + "authorids": "/s/sudhanshu-ranjan/; /d/dheeraj-mekala/; /j/jingbo-shang/", + "bibtex": "@inproceedings{ranjan-etal-2022-progressive,\n title = \"Progressive Sentiment Analysis for Code-Switched Text Data\",\n author = \"Ranjan, Sudhanshu and\n Mekala, Dheeraj and\n Shang, Jingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.82/\",\n doi = \"10.18653/v1/2022.findings-emnlp.82\",\n pages = \"1155--1167\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.82.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.82/", + "pdf_size": 1554132, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6863062663976182064&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "University of California San Diego; University of California San Diego; University of California San Diego+Hal\u0131c\u0131o\u011flu Data Science Institute, University of California San Diego", + "aff_domain": "ucsd.edu;ucsd.edu;ucsd.edu", + "email": "ucsd.edu;ucsd.edu;ucsd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "University of California, San Diego;University of California San Diego", + "aff_unique_dep": ";Hal\u0131c\u0131o\u011flu Data Science Institute", + "aff_unique_url": "https://ucsd.edu;https://ucsd.edu", + "aff_unique_abbr": "UCSD;UCSD", + "aff_campus_unique_index": "0;0;0+0", + "aff_campus_unique": "San Diego", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.412", + "title": "Prompt Compression and Contrastive Conditioning for Controllability and Toxicity Reduction in Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We explore the idea of compressing the prompts used to condition language models, and show that compressed prompts can retain a substantive amount of information about the original prompt. For severely compressed prompts, while fine-grained information is lost, abstract information and general sentiments can be retained with surprisingly few parameters, which can be useful in the context of decode-time algorithms for controllability and toxicity reduction. We find that some complex prompts can be effectively compressed into a single token to guide generation. We also show that compressed prompts are largely compositional, and can be constructed such that they can be used to control independent aspects of generated text.", + "author": "David Wingate; Mohammad Shoeybi; Taylor Sorensen", + "authorids": "/d/david-wingate/; /m/mohammad-shoeybi/; /t/taylor-sorensen/", + "bibtex": "@inproceedings{wingate-etal-2022-prompt,\n title = \"Prompt Compression and Contrastive Conditioning for Controllability and Toxicity Reduction in Language Models\",\n author = \"Wingate, David and\n Shoeybi, Mohammad and\n Sorensen, Taylor\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.412/\",\n doi = \"10.18653/v1/2022.findings-emnlp.412\",\n pages = \"5621--5634\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.412.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.412/", + "pdf_size": 1193804, + "gs_citation": 78, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4319444445048085507&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Brigham Young University\u2217; Nvidia, Inc.; University of Washington\u2020", + "aff_domain": "cs.byu.edu;nvidia.com;cs.washington.edu", + "email": "cs.byu.edu;nvidia.com;cs.washington.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Brigham Young University;NVIDIA Corporation;University of Washington", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.byu.edu;https://www.nvidia.com;https://www.washington.edu", + "aff_unique_abbr": "BYU;NVIDIA;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.766", + "title": "Prompt Conditioned VAE: Enhancing Generative Replay for Lifelong Learning in Task-Oriented Dialogue", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Lifelong learning (LL) is vital for advanced task-oriented dialogue (ToD) systems. To address the catastrophic forgetting issue of LL, generative replay methods are widely employed to consolidate past knowledge with generated pseudo samples. However, most existing generative replay methods use only a single task-specific token to control their models. This scheme is usually not strong enough to constrain the generative model due to insufficient information involved. In this paper, we propose a novel method, prompt conditioned VAE for lifelong learning (PCLL), to enhance generative replay by incorporating tasks\u2019 statistics. PCLL captures task-specific distributions with a conditional variational autoencoder, conditioned on natural language prompts to guide the pseudo-sample generation. Moreover, it leverages a distillation process to further consolidate past knowledge by alleviating the noise in pseudo samples. Experiments on natural language understanding tasks of ToD systems demonstrate that PCLL significantly outperforms competitive baselines in building lifelong learning models.", + "author": "Yingxiu Zhao; Yinhe Zheng; Zhiliang Tian; Chang Gao; Jian Sun; Nevin L. Zhang", + "authorids": "/y/yingxiu-zhao/; /y/yinhe-zheng/; /z/zhiliang-tian/; /c/chang-gao/; /j/jian-sun/; /n/nevin-l-zhang/", + "bibtex": "@inproceedings{zhao-etal-2022-prompt,\n title = \"Prompt Conditioned {VAE}: Enhancing Generative Replay for Lifelong Learning in Task-Oriented Dialogue\",\n author = \"Zhao, Yingxiu and\n Zheng, Yinhe and\n Tian, Zhiliang and\n Gao, Chang and\n Sun, Jian and\n Zhang, Nevin L.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.766/\",\n doi = \"10.18653/v1/2022.emnlp-main.766\",\n pages = \"11153--11169\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.766.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.766/", + "pdf_size": 801520, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16358302113526725483&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "The Hong Kong University of Science and Technology, Hong Kong; Alibaba Group, China; The Hong Kong University of Science and Technology, Hong Kong; The Chinese University of Hong Kong, Hong Kong; Alibaba Group, China; The Hong Kong University of Science and Technology, Hong Kong", + "aff_domain": "connect.ust.hk;connect.ust.hk;connect.ust.hk;163.com;se.cuhk.edu.hk;hotmail.com", + "email": "connect.ust.hk;connect.ust.hk;connect.ust.hk;163.com;se.cuhk.edu.hk;hotmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;2;1;0", + "aff_unique_norm": "Hong Kong University of Science and Technology;Alibaba Group;The Chinese University of Hong Kong", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ust.hk;https://www.alibaba.com;https://www.cuhk.edu.hk", + "aff_unique_abbr": "HKUST;Alibaba;CUHK", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Hong Kong;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.192", + "title": "Prompt Consistency for Zero-Shot Task Generalization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "One of the most impressive results of recent NLP history is the ability of pre-trained language models to solve new tasks in a zero-shot setting. To achieve this, NLP tasks are framed as natural language prompts, generating a response indicating the predicted output. Nonetheless, the performance in such settings often lags far behind its supervised counterpart, suggesting a large space for potential improvement. In this paper, we explore methods to utilize unlabeled data to improve zero-shot performance. Specifically, we take advantage of the fact that multiple prompts can be used to specify a single task, and propose to regularize prompt consistency, encouraging consistent predictions over this diverse set of prompts. Our method makes it possible to fine-tune the model either with extra unlabeled training data, or directly on test input at inference time in an unsupervised manner. In experiments, our approach outperforms the state-of-the-art zero-shot learner, T0, on 9 out of 11 datasets across 4 NLP tasks by up to 10.6 absolute points in terms of accuracy. The gains are often attained with a small number of unlabeled examples.", + "author": "Chunting Zhou; Junxian He; Xuezhe Ma; Taylor Berg-Kirkpatrick; Graham Neubig", + "authorids": "/c/chunting-zhou/; /j/junxian-he/; /x/xuezhe-ma/; /t/taylor-berg-kirkpatrick/; /g/graham-neubig/", + "bibtex": "@inproceedings{zhou-etal-2022-prompt,\n title = \"Prompt Consistency for Zero-Shot Task Generalization\",\n author = \"Zhou, Chunting and\n He, Junxian and\n Ma, Xuezhe and\n Berg-Kirkpatrick, Taylor and\n Neubig, Graham\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.192/\",\n doi = \"10.18653/v1/2022.findings-emnlp.192\",\n pages = \"2613--2626\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.192.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.192/", + "pdf_size": 979613, + "gs_citation": 80, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15742074074841379344&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Information Sciences Institute, University of Southern California; Department of Computer Science and Engineering, University of California San Diego; Language Technologies Institute, Carnegie Mellon University", + "aff_domain": "cs.cmu.edu;cs.cmu.edu;isi.edu;eng.ucsd.edu;cs.cmu.edu", + "email": "cs.cmu.edu;cs.cmu.edu;isi.edu;eng.ucsd.edu;cs.cmu.edu", + "github": "https://github.com/violet-zct/swarm-distillation-zero-shot", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "Carnegie Mellon University;University of Southern California;University of California, San Diego", + "aff_unique_dep": "Language Technologies Institute;Information Sciences Institute;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.cmu.edu;https://www.usc.edu;https://www.ucsd.edu", + "aff_unique_abbr": "CMU;USC;UCSD", + "aff_campus_unique_index": "0;0;1;2;0", + "aff_campus_unique": "Pittsburgh;Los Angeles;San Diego", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.87", + "title": "Prompt-Based Meta-Learning For Few-shot Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Few-shot Text Classification predicts the semantic label of a given text with a handful of supporting instances. Current meta-learning methods have achieved satisfying results in various few-shot situations. Still, they often require a large amount of data to construct many few-shot tasks for meta-training, which is not practical in real-world few-shot scenarios. Prompt-tuning has recently proved to be another effective few-shot learner by bridging the gap between pre-train and downstream tasks. In this work, we closely combine the two promising few-shot learning methodologies in structure and propose a Prompt-Based Meta-Learning (PBML) model to overcome the above meta-learning problem by adding the prompting mechanism. PBML assigns label word learning to base-learners and template learning to meta-learner, respectively. Experimental results show state-of-the-art performance on four text classification datasets under few-shot settings, with higher accuracy and good robustness. We demonstrate through low-resource experiments that our method alleviates the shortcoming that meta-learning requires too much data for meta-training. In the end, we use the visualization to interpret and verify that the meta-learning framework can help the prompting method converge better. We release our code to reproduce our experiments.", + "author": "Haoxing Zhang; Xiaofeng Zhang; Haibo Huang; Lei Yu", + "authorids": "/h/haoxing-zhang/; /x/xiaofeng-zhang/; /h/haibo-huang/; /l/lei-yu/", + "bibtex": "@inproceedings{zhang-etal-2022-prompt-based,\n title = \"Prompt-Based Meta-Learning For Few-shot Text Classification\",\n author = \"Zhang, Haoxing and\n Zhang, Xiaofeng and\n Huang, Haibo and\n Yu, Lei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.87/\",\n doi = \"10.18653/v1/2022.emnlp-main.87\",\n pages = \"1342--1357\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.87.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.87/", + "pdf_size": 1201995, + "gs_citation": 49, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14536806028079441652&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Sino-French Engineer School, Beihang University, Beijing, China; Sino-French Engineer School, Beihang University, Beijing, China; Sino-French Engineer School, Beihang University, Beijing, China; Sino-French Engineer School, Beihang University, Beijing, China", + "aff_domain": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "email": "buaa.edu.cn;buaa.edu.cn;buaa.edu.cn;buaa.edu.cn", + "github": "https://github.com/MGHZHANG/PBML", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Beihang University", + "aff_unique_dep": "Sino-French Engineer School", + "aff_unique_url": "http://www.buaa.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.401", + "title": "Prompt-Tuning Can Be Much Better Than Fine-Tuning on Cross-lingual Understanding With Multilingual Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Pre-trained multilingual language models show significant performance gains for zero-shot cross-lingual model transfer on a wide range of natural language understanding (NLU) tasks. Previously, for zero-shot cross-lingual evaluation, pre-trained models are only fine-tuned on English data and tested on a variety of target languages. In this paper, we do cross-lingualevaluation on various NLU tasks (sentence classification, sequence labeling, question answering) using prompt-tuning and compare it with fine-tuning. The results show that prompt tuning achieves much better cross-lingual transfer than fine-tuning across datasets, with only 0.1% to 0.3% tuned parameters. Additionally, we demonstrate through the analysis that prompt tuning can have better cross-lingual transfer-ability of representations on downstream tasks with better aligned decision boundaries.", + "author": "Lifu Tu; Caiming Xiong; Yingbo Zhou", + "authorids": "/l/lifu-tu/; /c/caiming-xiong/; /y/yingbo-zhou/", + "bibtex": "@inproceedings{tu-etal-2022-prompt,\n title = \"Prompt-Tuning Can Be Much Better Than Fine-Tuning on Cross-lingual Understanding With Multilingual Language Models\",\n author = \"Tu, Lifu and\n Xiong, Caiming and\n Zhou, Yingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.401/\",\n doi = \"10.18653/v1/2022.findings-emnlp.401\",\n pages = \"5478--5485\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.401.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.401/", + "pdf_size": 2289851, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15906984413771833421&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff": "Salesforce AI Research; Salesforce AI Research; Salesforce AI Research", + "aff_domain": "salesforce.com;salesforce.com;salesforce.com", + "email": "salesforce.com;salesforce.com;salesforce.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Salesforce", + "aff_unique_dep": "Salesforce AI Research", + "aff_unique_url": "https://www.salesforce.com", + "aff_unique_abbr": "Salesforce AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.141", + "title": "Prompt-and-Rerank: A Method for Zero-Shot and Few-Shot Arbitrary Textual Style Transfer with Small Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We propose a method for arbitrary textual style transfer (TST)\u2014the task of transforming a text into any given style\u2014utilizing general-purpose pre-trained language models. Our method, Prompt-and-Rerank, is based on a mathematical formulation of the TST task, decomposing it into three constituent components: textual similarity, target style strength, and fluency. Our method uses zero-shot or few-shot prompting to obtain a set of candidate generations in the target style, and then re-ranks them according to the three components. Our method enables small pre-trained language models to perform on par with state-of-the-art large-scale models while using two orders of magnitude less compute and memory. We also investigate the effect of model size and prompt design (e.g., prompt paraphrasing and delimiter-pair choice) on style transfer quality across seven diverse textual style transfer datasets, finding, among other things, that delimiter-pair choice has a large impact on performance, and that models have biases on the direction of style transfer.", + "author": "Mirac Suzgun; Luke Melas-Kyriazi; Dan Jurafsky", + "authorids": "/m/mirac-suzgun/; /l/luke-melas-kyriazi/; /d/dan-jurafsky/", + "bibtex": "@inproceedings{suzgun-etal-2022-prompt,\n title = \"Prompt-and-Rerank: A Method for Zero-Shot and Few-Shot Arbitrary Textual Style Transfer with Small Language Models\",\n author = \"Suzgun, Mirac and\n Melas-Kyriazi, Luke and\n Jurafsky, Dan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.141/\",\n doi = \"10.18653/v1/2022.emnlp-main.141\",\n pages = \"2195--2222\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.141.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.141/", + "pdf_size": 532292, + "gs_citation": 68, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6208521303248309779&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Stanford University; Oxford University; Stanford University", + "aff_domain": "cs.stanford.edu;robots.ox.ac.uk;cs.stanford.edu", + "email": "cs.stanford.edu;robots.ox.ac.uk;cs.stanford.edu", + "github": "https://github.com/suzgunmirac/prompt-and-rerank", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Stanford University;University of Oxford", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.stanford.edu;https://www.ox.ac.uk", + "aff_unique_abbr": "Stanford;Oxford", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Stanford;", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "2022.findings-emnlp.282", + "title": "Prompt-based Connective Prediction Method for Fine-grained Implicit Discourse Relation Recognition", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Due to the absence of connectives, implicit discourse relation recognition (IDRR) is still a challenging and crucial task in discourse analysis. Most of the current work adopted multitask learning to aid IDRR through explicit discourse relation recognition (EDRR) or utilized dependencies between discourse relation labels to constrain model predictions. But these methods still performed poorly on fine-grained IDRR and even utterly misidentified on most of the few-shot discourse relation classes. To address these problems, we propose a novel Prompt-based Connective Prediction (PCP) method for IDRR. Our method instructs large-scale pre-trained models to use knowledge relevant to discourse relation and utilizes the strong correlation between connectives and discourse relation to help the model recognize implicit discourse relations. Experimental results show that our method surpasses the current state-of-the-art model and achieves significant improvements on those fine-grained few-shot discourse relation. Moreover, our approach is able to be transferred to EDRR and obtain acceptable results. Our code is released in https://github.com/zh-i9/PCP-for-IDRR.", + "author": "Hao Zhou; Man Lan; Yuanbin Wu; Yuefeng Chen; Meirong Ma", + "authorids": "/h/hao-zhou/; /m/man-lan/; /y/yuanbin-wu/; /y/yuefeng-chen/; /m/meirong-ma/", + "bibtex": "@inproceedings{zhou-etal-2022-prompt-based,\n title = \"Prompt-based Connective Prediction Method for Fine-grained Implicit Discourse Relation Recognition\",\n author = \"Zhou, Hao and\n Lan, Man and\n Wu, Yuanbin and\n Chen, Yuefeng and\n Ma, Meirong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.282/\",\n doi = \"10.18653/v1/2022.findings-emnlp.282\",\n pages = \"3848--3858\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.282.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.282/", + "pdf_size": 497328, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2384029549696801857&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "School of Computer Science and Technology, East China Normal University, Shanghai, China+Shanghai Institute of AI for Education, East China Normal University, Shanghai, China; School of Computer Science and Technology, East China Normal University, Shanghai, China+Shanghai Institute of AI for Education, East China Normal University, Shanghai, China; School of Computer Science and Technology, East China Normal University, Shanghai, China; Shanghai Transsion Co., Ltd., Shanghai, China; Shanghai Transsion Co., Ltd., Shanghai, China", + "aff_domain": "stu.ecnu.edu.cn;cs.ecnu.edu.cn;cs.ecnu.edu.cn;transsion.com;transsion.com", + "email": "stu.ecnu.edu.cn;cs.ecnu.edu.cn;cs.ecnu.edu.cn;transsion.com;transsion.com", + "github": "https://github.com/zh-i9/PCP-for-IDRR", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0;0+0;0;1;1", + "aff_unique_norm": "East China Normal University;Shanghai Transsion Co., Ltd.", + "aff_unique_dep": "School of Computer Science and Technology;", + "aff_unique_url": "http://www.ecnu.edu.cn;", + "aff_unique_abbr": "ECNU;", + "aff_campus_unique_index": "0+0;0+0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0+0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.690", + "title": "Prompt-based Distribution Alignment for Domain Generalization in Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prompt-based learning (a.k.a. prompting) achieves high performance by bridging the gap between the objectives of language modeling and downstream tasks. Domain generalization ability can be improved by prompting since classification across different domains can be unified into the prediction of the same set of label words. The remaining challenge for domain generalization by prompting comes from discrepancies between the data distribution of different domains. To improve domain generalization with prompting, we learn distributional invariance across source domains via two alignment regularization loss functions. The first is vocabulary distribution alignment, which uses a Kullback-Leibler divergence regularization on source-domain vocabulary distributions. The second is feature distribution alignment, which uses a novel adversarial training strategy to learn domain invariant representation across source domains. Experiments on sentiment analysis and natural language inference show the effectiveness of our method and achieve state-of-the-art results on six datasets.", + "author": "Chen Jia; Yue Zhang", + "authorids": "/c/chen-jia/; /y/yue-zhang/", + "bibtex": "@inproceedings{jia-zhang-2022-prompt,\n title = \"Prompt-based Distribution Alignment for Domain Generalization in Text Classification\",\n author = \"Jia, Chen and\n Zhang, Yue\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.690/\",\n doi = \"10.18653/v1/2022.emnlp-main.690\",\n pages = \"10147--10157\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.690.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.690/", + "pdf_size": 1551611, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2960199433467640772&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 0, + "aff": "Fudan University, China\u2021School of Engineering, Westlake University, China\u00a7Institute of Advanced Technology, Westlake Institute for Advanced Study, China; Fudan University, China\u2021School of Engineering, Westlake University, China\u00a7Institute of Advanced Technology, Westlake Institute for Advanced Study, China", + "aff_domain": "westlake.edu.cn;westlake.edu.cn", + "email": "westlake.edu.cn;westlake.edu.cn", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Fudan University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.fudan.edu.cn", + "aff_unique_abbr": "Fudan", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.512", + "title": "Prompt-learning for Fine-grained Entity Typing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "As an effective approach to adapting pre-trained language models (PLMs) for specific tasks, prompt-learning has recently attracted much attention from researchers. By using cloze-style language prompts to stimulate the versatile knowledge of PLMs, prompt-learning can achieve promising results on a series of NLP tasks, such as natural language inference, sentiment classification, and knowledge probing. In this work, we investigate the application of prompt-learning on fine-grained entity typing in fully supervised, few-shot, and zero-shot scenarios. We first develop a simple and effective prompt-learning pipeline by constructing entity-oriented verbalizers and templates and conducting masked language modeling. Further, to tackle the zero-shot regime, we propose a self-supervised strategy that carries out distribution-level optimization in prompt-learning to automatically summarize the information of entity types. Extensive experiments on four fine-grained entity typing benchmarks under fully supervised, few-shot, and zero-shot settings show the effectiveness of the prompt-learning paradigm and further make a powerful alternative to vanilla fine-tuning.", + "author": "Ning Ding; Yulin Chen; Xu Han; Guangwei Xu; Xiaobin Wang; Pengjun Xie; Haitao Zheng; Zhiyuan Liu; Juanzi Li; Hong-Gee Kim", + "authorids": "/n/ning-ding/; /y/yulin-chen/; /x/xu-han/; /g/guangwei-xu/; /x/xiaobin-wang/; /p/pengjun-xie/; /h/haitao-zheng/; /z/zhiyuan-liu/; /j/juanzi-li/; /h/hong-gee-kim/", + "bibtex": "@inproceedings{ding-etal-2022-prompt,\n title = \"Prompt-learning for Fine-grained Entity Typing\",\n author = \"Ding, Ning and\n Chen, Yulin and\n Han, Xu and\n Xu, Guangwei and\n Wang, Xiaobin and\n Xie, Pengjun and\n Zheng, Haitao and\n Liu, Zhiyuan and\n Li, Juanzi and\n Kim, Hong-Gee\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.512/\",\n doi = \"10.18653/v1/2022.findings-emnlp.512\",\n pages = \"6888--6901\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.512.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.512/", + "pdf_size": 1446666, + "gs_citation": 172, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7458921155269670724&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China; SIGS, Tsinghua University; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; SIGS, Tsinghua University; Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China; Dept. of Comp. Sci. & Tech., Institute for AI, Tsinghua University, Beijing, China; Seoul National University", + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ; ; ; ; ", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;1;1;1;1;0;0;0;2", + "aff_unique_norm": "Tsinghua University;Alibaba Group;Seoul National University", + "aff_unique_dep": "Dept. of Comp. Sci. & Tech.;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.alibaba.com;https://www.snu.ac.kr", + "aff_unique_abbr": "THU;Alibaba;SNU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;1", + "aff_country_unique": "China;South Korea" + }, + { + "id": "2022.emnlp-main.603", + "title": "PromptBERT: Improving BERT Sentence Embeddings with Prompts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We propose PromptBERT, a novel contrastive learning method for learning better sentence representation. We firstly analysis the drawback of current sentence embedding from original BERT and find that it is mainly due to the static token embedding bias and ineffective BERT layers. Then we propose the first prompt-based sentence embeddings method and discuss two prompt representing methods and three prompt searching methods to make BERT achieve better sentence embeddings .Moreover, we propose a novel unsupervised training objective by the technology of template denoising, which substantially shortens the performance gap between the supervised and unsupervised settings. Extensive experiments show the effectiveness of our method. Compared to SimCSE, PromptBert achieves 2.29 and 2.58 points of improvement based on BERT and RoBERTa in the unsupervised setting.", + "author": "Ting Jiang; Jian Jiao; Shaohan Huang; Zihan Zhang; Deqing Wang; Fuzhen Zhuang; Furu Wei; Haizhen Huang; Denvy Deng; Qi Zhang", + "authorids": "/t/ting-jiang/; /j/jian-jiao/; /s/shaohan-huang/; /z/zihan-zhang/; /d/deqing-wang/; /f/fuzhen-zhuang/; /f/furu-wei/; /h/haizhen-huang/; /d/denvy-deng/; /q/qi-zhang/", + "bibtex": "@inproceedings{jiang-etal-2022-promptbert,\n title = \"{P}rompt{BERT}: Improving {BERT} Sentence Embeddings with Prompts\",\n author = \"Jiang, Ting and\n Jiao, Jian and\n Huang, Shaohan and\n Zhang, Zihan and\n Wang, Deqing and\n Zhuang, Fuzhen and\n Wei, Furu and\n Huang, Haizhen and\n Deng, Denvy and\n Zhang, Qi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.603/\",\n doi = \"10.18653/v1/2022.emnlp-main.603\",\n pages = \"8826--8837\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.603.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.603/", + "pdf_size": 4068817, + "gs_citation": 205, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2175965949696178589&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": ";;;;;;;;;", + "aff_domain": ";;;;;;;;;", + "email": ";;;;;;;;;", + "github": "https://github.com/kongds/Prompt-BERT", + "project": "", + "author_num": 10 + }, + { + "id": "2022.emnlp-main.185", + "title": "PromptEHR: Conditional Electronic Healthcare Records Generation with Prompt Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Accessing longitudinal multimodal Electronic Healthcare Records (EHRs) is challenging due to privacy concerns, which hinders the use of ML for healthcare applications. Synthetic EHRs generation bypasses the need to share sensitive real patient records. However, existing methods generate single-modal EHRs by unconditional generation or by longitudinal inference, which falls short of low flexibility and makes unrealistic EHRs. In this work, we propose to formulate EHRs generation as a text-to-text translation task by language models (LMs), which suffices to highly flexible event imputation during generation. We also design prompt learning to control the generation conditioned by numerical and categorical demographic features. We evaluate synthetic EHRs quality by two perplexity measures accounting for their longitudinal pattern (longitudinal imputation perplexity, lpl) and the connections cross modalities (cross-modality imputation perplexity, mpl). Moreover, we utilize two adversaries: membership and attribute inference attacks for privacy-preserving evaluation. Experiments on MIMIC-III data demonstrate the superiority of our methods on realistic EHRs generation (53.1% decrease of lpl and 45.3% decrease of mpl on average compared to the best baselines) with low privacy risks. Software is available at https://github.com/RyanWangZf/PromptEHR.", + "author": "Zifeng Wang; Jimeng Sun", + "authorids": "/z/zifeng-wang/; /j/jimeng-sun/", + "bibtex": "@inproceedings{wang-sun-2022-promptehr,\n title = \"{P}rompt{EHR}: Conditional Electronic Healthcare Records Generation with Prompt Learning\",\n author = \"Wang, Zifeng and\n Sun, Jimeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.185/\",\n doi = \"10.18653/v1/2022.emnlp-main.185\",\n pages = \"2873--2885\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.185.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.185/", + "pdf_size": 698206, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9443817317135310955&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 8, + "aff": "Department of Computer Science, University of Illinois Urbana-Champaign + Carle Illinois College of Medicine, University of Illinois Urbana-Champaign; Department of Computer Science, University of Illinois Urbana-Champaign + Carle Illinois College of Medicine, University of Illinois Urbana-Champaign", + "aff_domain": "illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu", + "github": "https://github.com/RyanWangZf/PromptEHR", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "University of Illinois Urbana-Champaign", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://illinois.edu", + "aff_unique_abbr": "UIUC", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Urbana-Champaign", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.780", + "title": "Prompting ELECTRA: Few-Shot Learning with Discriminative Pre-Trained Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained masked language models successfully perform few-shot learning by formulating downstream tasks as text infilling. How- ever, as a strong alternative in full-shot settings, discriminative pre-trained models like ELECTRA do not fit into the paradigm. In this work, we adapt prompt-based few-shot learning to ELECTRA and show that it outperforms masked language models in a wide range of tasks. ELECTRA is pre-trained to distinguish if a token is generated or original. We naturally extend that to prompt-based few-shot learning by training to score the originality of the target options without introducing new parameters. Our method can be easily adapted to tasks involving multi-token predictions without extra computation overhead. Analysis shows that ELECTRA learns distributions that align better with downstream tasks.", + "author": "Mengzhou Xia; Mikel Artetxe; Jingfei Du; Danqi Chen; Veselin Stoyanov", + "authorids": "/m/mengzhou-xia/; /m/mikel-artetxe/; /j/jingfei-du/; /d/danqi-chen/; /v/veselin-stoyanov/", + "bibtex": "@inproceedings{xia-etal-2022-prompting,\n title = \"Prompting {ELECTRA}: Few-Shot Learning with Discriminative Pre-Trained Models\",\n author = \"Xia, Mengzhou and\n Artetxe, Mikel and\n Du, Jingfei and\n Chen, Danqi and\n Stoyanov, Veselin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.780/\",\n doi = \"10.18653/v1/2022.emnlp-main.780\",\n pages = \"11351--11361\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.780.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.780/", + "pdf_size": 1292511, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12709086721055638162&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "Princeton University; Meta AI; Meta AI; Princeton University; Meta AI", + "aff_domain": "cs.princeton.edu;meta.com;meta.com;cs.princeton.edu;meta.com", + "email": "cs.princeton.edu;meta.com;meta.com;cs.princeton.edu;meta.com", + "github": "https://github.com/facebookresearch/ELECTRA-Fewshot-Learning", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0;1", + "aff_unique_norm": "Princeton University;Meta Platforms, Inc.", + "aff_unique_dep": ";Meta AI", + "aff_unique_url": "https://www.princeton.edu;https://meta.com", + "aff_unique_abbr": "Princeton;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.22", + "title": "Prompting for Multimodal Hateful Meme Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Hateful meme classification is a challenging multimodal task that requires complex reasoning and contextual background knowledge. Ideally, we could leverage an explicit external knowledge base to supplement contextual and cultural information in hateful memes. However, there is no known explicit external knowledge base that could provide such hate speech contextual information. To address this gap, we propose PromptHate, a simple yet effective prompt-based model that prompts pre-trained language models (PLMs) for hateful meme classification. Specifically, we construct simple prompts and provide a few in-context examples to exploit the implicit knowledge in the pre-trained RoBERTa language model for hateful meme classification. We conduct extensive experiments on two publicly available hateful and offensive meme datasets. Our experiment results show that PromptHate is able to achieve a high AUC of 90.96, outperforming state-of-the-art baselines on the hateful meme classification task. We also perform fine-grain analyses and case studies on various prompt settings and demonstrate the effectiveness of the prompts on hateful meme classification.", + "author": "Rui Cao; Roy Ka-Wei Lee; Wen-Haw Chong; Jing Jiang", + "authorids": "/r/rui-cao/; /r/roy-ka-wei-lee/; /w/wen-haw-chong/; /j/jing-jiang/", + "bibtex": "@inproceedings{cao-etal-2022-prompting,\n title = \"Prompting for Multimodal Hateful Meme Classification\",\n author = \"Cao, Rui and\n Lee, Roy Ka-Wei and\n Chong, Wen-Haw and\n Jiang, Jing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.22/\",\n doi = \"10.18653/v1/2022.emnlp-main.22\",\n pages = \"321--332\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.22.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.22/", + "pdf_size": 1929204, + "gs_citation": 86, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12559249032659892373&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Singapore Management University; Singapore University of Design and Technology; Singapore Management University; Singapore Management University", + "aff_domain": "phdcs.smu.edu.sg;phdis.smu.edu.sg;smu.edu.sg;sutd.edu.sg", + "email": "phdcs.smu.edu.sg;phdis.smu.edu.sg;smu.edu.sg;sutd.edu.sg", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "Singapore Management University;Singapore University of Design and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.smu.edu.sg;https://www.sudt.edu.sg", + "aff_unique_abbr": "SMU;SUDT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "2022.emnlp-main.747", + "title": "ProofInfer: Generating Proof via Iterative Hierarchical Inference", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Proof generation focuses on deductive reasoning: given a hypothesis and a set of theories, including some supporting facts and logical rules expressed in natural language, the model generates a proof tree indicating how to deduce the hypothesis from given theories.Current models with state-of-the-art performance employ the stepwise method that adds an individual node to the proof step-by-step.However, these methods actually focus on generating several proof paths rather than a whole tree.During generation, they focus on the most relevant areas of the currently generated node while neglecting the rest of the proof tree. To address this problem, we propose ProofInfer, which generates the proof tree via iterative hierarchical inference.At each step, ProofInfer adds the entire layer to the proof, where all nodes in this layer are generated simultaneously. Since the conventional autoregressive generation architecture cannot simultaneously predict multiple nodes, ProofInfer employs text-to-text paradigm.To this end, we propose a divide-and-conquer algorithm to encode the proof tree as the plain text without losing structure information.Experimental results show that ProofInfer significantly improves performance on several widely-used datasets.In addition, ProofInfer still performs well with data-limited, achieving comparable performance to the state-of-the-art model with about 40% of the training data.", + "author": "Zichu Fei; Qi Zhang; Xin Zhou; Tao Gui; Xuanjing Huang", + "authorids": "/z/zichu-fei/; /q/qi-zhang/; /x/xin-zhou/; /t/tao-gui/; /x/xuan-jing-huang/", + "bibtex": "@inproceedings{fei-etal-2022-proofinfer,\n title = \"{P}roof{I}nfer: Generating Proof via Iterative Hierarchical Inference\",\n author = \"Fei, Zichu and\n Zhang, Qi and\n Zhou, Xin and\n Gui, Tao and\n Huang, Xuanjing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.747/\",\n doi = \"10.18653/v1/2022.emnlp-main.747\",\n pages = \"10883--10892\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.747.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.747/", + "pdf_size": 862356, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6384997506761368677&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "https://github.com/sion-zcfei/ProofInfer", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.267", + "title": "ProsocialDialog: A Prosocial Backbone for Conversational Agents", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Most existing dialogue systems fail to respond properly to potentially unsafe user utterances by either ignoring or passively agreeing with them. To address this issue, we introduce ProsocialDialog, the first large-scale multi-turn dialogue dataset to teach conversational agents to respond to problematic content following social norms. Covering diverse unethical, problematic, biased, and toxic situations, ProsocialDialog contains responses that encourage prosocial behavior, grounded in commonsense social rules (i.e., rules-of-thumb, RoTs). Created via a human-AI collaborative framework, ProsocialDialog consists of 58K dialogues, with 331K utterances, 160K unique RoTs, and 497K dialogue safety labels accompanied by free-form rationales.With this dataset, we introduce a dialogue safety detection module, Canary, capable of generating RoTs given conversational context, and a socially-informed dialogue agent, Prost. Empirical results show that Prost generates more socially acceptable dialogues compared to other state-of-the-art language and dialogue models in both in-domain and out-of-domain settings. Additionally, Canary effectively guides conversational agents and off-the-shelf language models to generate significantly more prosocial responses. Our work highlights the promise and importance of creating and steering conversational AI to be socially responsible.", + "author": "Hyunwoo Kim; Youngjae Yu; Liwei Jiang; Ximing Lu; Daniel Khashabi; Gunhee Kim; Yejin Choi; Maarten Sap", + "authorids": "/h/hyunwoo-kim/; /y/youngjae-yu/; /l/liwei-jiang/; /x/ximing-lu/; /d/daniel-khashabi/; /g/gunhee-kim/; /y/yejin-choi/; /m/maarten-sap/", + "bibtex": "@inproceedings{kim-etal-2022-prosocialdialog,\n title = \"{P}rosocial{D}ialog: A Prosocial Backbone for Conversational Agents\",\n author = \"Kim, Hyunwoo and\n Yu, Youngjae and\n Jiang, Liwei and\n Lu, Ximing and\n Khashabi, Daniel and\n Kim, Gunhee and\n Choi, Yejin and\n Sap, Maarten\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.267/\",\n doi = \"10.18653/v1/2022.emnlp-main.267\",\n pages = \"4005--4029\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.267.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.267/", + "pdf_size": 3694801, + "gs_citation": 114, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17605114889409794755&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "Allen Institute for Artificial Intelligence+Department of Computer Science and Engineering, Seoul National University+Paul G. Allen School of Computer Science, University of Washington; Allen Institute for Artificial Intelligence+Paul G. Allen School of Computer Science, University of Washington; Allen Institute for Artificial Intelligence+Paul G. Allen School of Computer Science, University of Washington; Allen Institute for Artificial Intelligence+Paul G. Allen School of Computer Science, University of Washington; Johns Hopkins University+Language Technologies Institute, Carnegie Mellon University; Department of Computer Science and Engineering, Seoul National University; Allen Institute for Artificial Intelligence+Paul G. Allen School of Computer Science, University of Washington; Allen Institute for Artificial Intelligence+Johns Hopkins University+Language Technologies Institute, Carnegie Mellon University", + "aff_domain": "vl.snu.ac.kr; ; ; ; ; ; ; ", + "email": "vl.snu.ac.kr; ; ; ; ; ; ; ", + "github": "", + "project": "https://hyunw.kim/prosocial-dialog4005", + "author_num": 8, + "aff_unique_index": "0+1+2;0+2;0+2;0+2;3+4;1;0+2;0+3+4", + "aff_unique_norm": "Allen Institute for Artificial Intelligence;Seoul National University;University of Washington;Johns Hopkins University;Carnegie Mellon University", + "aff_unique_dep": ";Department of Computer Science and Engineering;Paul G. Allen School of Computer Science;;Language Technologies Institute", + "aff_unique_url": "https://allenai.org;https://www.snu.ac.kr;https://www.cs.washington.edu;https://www.jhu.edu;https://www.cmu.edu", + "aff_unique_abbr": "AI2;SNU;UW;JHU;CMU", + "aff_campus_unique_index": "1+2;2;2;2;3;1;2;3", + "aff_campus_unique": ";Seoul;Seattle;Pittsburgh", + "aff_country_unique_index": "0+1+0;0+0;0+0;0+0;0+0;1;0+0;0+0+0", + "aff_country_unique": "United States;South Korea" + }, + { + "id": "2022.emnlp-industry.47", + "title": "Prototype-Representations for Training Data Filtering in Weakly-Supervised Information Extraction", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "The availability of high quality training data is still a bottleneck for the practical utilization of information extraction models, despite the breakthroughs in zero and few-shot learning techniques. This is further exacerbated for industry applications, where new tasks, domains, and specific use cases keep arising, which makes it impractical to depend on manually annotated data. Therefore, weak and distant supervision emerged as popular approaches to bootstrap training, utilizing labeling functions to guide the annotation process. Weakly-supervised annotation of training data is fast and efficient, however, it results in many irrelevant and out-of-context matches. This is a challenging problem that can degrade the performance in downstream models, or require a manual data cleaning step that can incur significant overhead. In this paper we present a prototype-based filtering approach, that can be utilized to denoise weakly supervised training data. The system is very simple, unsupervised, scalable, and requires little manual intervention, yet results in significant precision gains. We apply the technique in the task of attribute value extraction in e-commerce websites, and achieve up to 9% gain in precision for the downstream models, with a minimal drop in recall.", + "author": "Nasser Zalmout; Xian Li", + "authorids": "/n/nasser-zalmout/; /x/xian-li/", + "bibtex": "@inproceedings{zalmout-li-2022-prototype,\n title = \"Prototype-Representations for Training Data Filtering in Weakly-Supervised Information Extraction\",\n author = \"Zalmout, Nasser and\n Li, Xian\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.47/\",\n doi = \"10.18653/v1/2022.emnlp-industry.47\",\n pages = \"467--474\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.47.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.47/", + "pdf_size": 581359, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7117369649775549099&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Amazon.com; Amazon.com", + "aff_domain": "amazon.com;amazon.com", + "email": "amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Amazon", + "aff_unique_dep": "", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.800", + "title": "Pseudo-Relevance for Enhancing Document Representation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This paper studies how to enhance the document representation for the bi-encoder approach in dense document retrieval. The bi-encoder, separately encoding a query and a document as a single vector, is favored for high efficiency in large-scale information retrieval, compared to more effective but complex architectures. To combine the strength of the two, the multi-vector representation of documents for bi-encoder, such as ColBERT preserving all token embeddings, has been widely adopted. Our contribution is to reduce the size of the multi-vector representation, without compromising the effectiveness, supervised by query logs. Our proposed solution decreases the latency and the memory footprint, up to 8- and 3-fold, validated on MSMARCO and real-world search query logs.", + "author": "Jihyuk Kim; Seung-won Hwang; Seoho Song; Hyeseon Ko; Young-In Song", + "authorids": "/j/jihyuk-kim/; /s/seung-won-hwang/; /s/seoho-song/; /h/hyeseon-ko/; /y/young-in-song/", + "bibtex": "@inproceedings{kim-etal-2022-pseudo,\n title = \"Pseudo-Relevance for Enhancing Document Representation\",\n author = \"Kim, Jihyuk and\n Hwang, Seung-won and\n Song, Seoho and\n Ko, Hyeseon and\n Song, Young-In\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.800/\",\n doi = \"10.18653/v1/2022.emnlp-main.800\",\n pages = \"11639--11652\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.800.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.800/", + "pdf_size": 518225, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1643960744276983679&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Yonsei University; Seoul National University; NAVER Corp; NAVER Corp; NAVER Corp", + "aff_domain": "yonsei.ac.kr;snu.ac.kr;navercorp.com;navercorp.com;navercorp.com", + "email": "yonsei.ac.kr;snu.ac.kr;navercorp.com;navercorp.com;navercorp.com", + "github": "https://github.com/jihyukkim-nlp/PQA-ColBERT", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;2;2", + "aff_unique_norm": "Yonsei University;Seoul National University;NAVER Corporation", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.yonsei.ac.kr;https://www.snu.ac.kr;https://www.naver.com", + "aff_unique_abbr": "Yonsei;SNU;NAVER", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.246", + "title": "PseudoReasoner: Leveraging Pseudo Labels for Commonsense Knowledge Base Population", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Commonsense Knowledge Base (CSKB) Population aims at reasoning over unseen entities and assertions on CSKBs, and is an important yet hard commonsense reasoning task. One challenge is that it requires out-of-domain generalization ability as the source CSKB for training is of a relatively smaller scale (1M) while the whole candidate space for population is way larger (200M). We propose PseudoReasoner, a semi-supervised learning framework for CSKB population that uses a teacher model pre-trained on CSKBs to provide pseudo labels on the unlabeled candidate dataset for a student model to learn from. The teacher can be a generative model rather than restricted to discriminative models as previous works.In addition, we design a new filtering procedure for pseudo labels based on influence function and the student model\u2019s prediction to further improve the performance. The framework can improve the backbone model KG-BERT (RoBERTa-large) by 3.3 points on the overall performance and especially, 5.3 points on the out-of-domain performance, and achieves the state-of-the-art. The codes will be made public on acceptance. Codes and data are available at https://github.com/HKUST-KnowComp/PseudoReasoner.", + "author": "Tianqing Fang; Quyet V. Do; Hongming Zhang; Yangqiu Song; Ginny Y. Wong; Simon See", + "authorids": "/t/tianqing-fang/; /q/quyet-v-do/; /h/hongming-zhang/; /y/yangqiu-song/; /g/ginny-y-wong/; /s/simon-see/", + "bibtex": "@inproceedings{fang-etal-2022-pseudoreasoner,\n title = \"{P}seudo{R}easoner: Leveraging Pseudo Labels for Commonsense Knowledge Base Population\",\n author = \"Fang, Tianqing and\n Do, Quyet V. and\n Zhang, Hongming and\n Song, Yangqiu and\n Wong, Ginny Y. and\n See, Simon\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.246/\",\n doi = \"10.18653/v1/2022.findings-emnlp.246\",\n pages = \"3379--3394\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.246.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.246/", + "pdf_size": 664491, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8251871738677073947&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Engineering, HKUST, Hong Kong SAR; Department of Computer Science and Engineering, HKUST, Hong Kong SAR; Tencent AI Lab, Bellevue, USA; Department of Computer Science and Engineering, HKUST, Hong Kong SAR; NVIDIA AI Technology Center (NV AITC), NVIDIA, Santa Clara, USA; NVIDIA AI Technology Center (NV AITC), NVIDIA, Santa Clara, USA", + "aff_domain": "cse.ust.hk;cse.ust.hk;global.tencent.com;cse.ust.hk;nvidia.com;nvidia.com", + "email": "cse.ust.hk;cse.ust.hk;global.tencent.com;cse.ust.hk;nvidia.com;nvidia.com", + "github": "https://github.com/HKUST-KnowComp/PseudoReasoner", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;2;2", + "aff_unique_norm": "Hong Kong University of Science and Technology;Tencent;NVIDIA", + "aff_unique_dep": "Department of Computer Science and Engineering;AI Lab;NVIDIA AI Technology Center", + "aff_unique_url": "https://www.hkust.edu.hk;https://ai.tencent.com;https://www.nvidia.com", + "aff_unique_abbr": "HKUST;Tencent AI Lab;NV", + "aff_campus_unique_index": "0;0;1;0;2;2", + "aff_campus_unique": "Hong Kong SAR;Bellevue;Santa Clara", + "aff_country_unique_index": "0;0;1;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.489", + "title": "Q-TOD: A Query-driven Task-oriented Dialogue System", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing pipelined task-oriented dialogue systems usually have difficulties adapting to unseen domains, whereas end-to-end systems are plagued by large-scale knowledge bases in practice. In this paper, we introduce a novel query-driven task-oriented dialogue system, namely Q-TOD. The essential information from the dialogue context is extracted into a query, which is further employed to retrieve relevant knowledge records for response generation. Firstly, as the query is in the form of natural language and not confined to the schema of the knowledge base, the issue of domain adaption is alleviated remarkably in Q-TOD. Secondly, as the query enables the decoupling of knowledge retrieval from the generation, Q-TOD gets rid of the issue of knowledge base scalability. To evaluate the effectiveness of the proposed Q-TOD, we collect query annotations for three publicly available task-oriented dialogue datasets. Comprehensive experiments verify that Q-TOD outperforms strong baselines and establishes a new state-of-the-art performance on these datasets.", + "author": "Xin Tian; Yingzhan Lin; Mengfei Song; Siqi Bao; Fan Wang; Huang He; Shuqi Sun; Hua Wu", + "authorids": "/x/xin-tian/; /y/yingzhan-lin/; /m/mengfei-song/; /s/siqi-bao/; /f/fan-wang/; /h/huang-he/; /s/shuqi-sun/; /h/hua-wu/", + "bibtex": "@inproceedings{tian-etal-2022-q,\n title = \"{Q}-{TOD}: A Query-driven Task-oriented Dialogue System\",\n author = \"Tian, Xin and\n Lin, Yingzhan and\n Song, Mengfei and\n Bao, Siqi and\n Wang, Fan and\n He, Huang and\n Sun, Shuqi and\n Wu, Hua\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.489/\",\n doi = \"10.18653/v1/2022.emnlp-main.489\",\n pages = \"7260--7271\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.489.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.489/", + "pdf_size": 376779, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13694534790322284668&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China; Baidu Inc., China", + "aff_domain": "baidu.com;baidu.com;baidu.com; ; ; ; ; ", + "email": "baidu.com;baidu.com;baidu.com; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Baidu Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.baidu.com", + "aff_unique_abbr": "Baidu", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.147", + "title": "QA Domain Adaptation using Hidden Space Augmentation and Self-Supervised Contrastive Adaptation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Question answering (QA) has recently shown impressive results for answering questions from customized domains. Yet, a common challenge is to adapt QA models to an unseen target domain. In this paper, we propose a novel self-supervised framework called QADA for QA domain adaptation. QADA introduces a novel data augmentation pipeline used to augment training QA samples. Different from existing methods, we enrich the samples via hidden space augmentation. For questions, we introduce multi-hop synonyms and sample augmented token embeddings with Dirichlet distributions. For contexts, we develop an augmentation method which learns to drop context spans via a custom attentive sampling strategy. Additionally, contrastive learning is integrated in the proposed self-supervised adaptation framework QADA. Unlike existing approaches, we generate pseudo labels and propose to train the model via a novel attention-based contrastive adaptation method. The attention weights are used to build informative features for discrepancy estimation that helps the QA model separate answers and generalize across source and target domains. To the best of our knowledge, our work is the first to leverage hidden space augmentation and attention-based contrastive adaptation for self-supervised domain adaptation in QA. Our evaluation shows that QADA achieves considerable improvements on multiple target datasets over state-of-the-art baselines in QA domain adaptation.", + "author": "Zhenrui Yue; Huimin Zeng; Bernhard Kratzwald; Stefan Feuerriegel; Dong Wang", + "authorids": "/z/zhenrui-yue/; /h/huimin-zeng/; /b/bernhard-kratzwald/; /s/stefan-feuerriegel/; /d/dong-wang/", + "bibtex": "@inproceedings{yue-etal-2022-qa,\n title = \"{QA} Domain Adaptation using Hidden Space Augmentation and Self-Supervised Contrastive Adaptation\",\n author = \"Yue, Zhenrui and\n Zeng, Huimin and\n Kratzwald, Bernhard and\n Feuerriegel, Stefan and\n Wang, Dong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.147/\",\n doi = \"10.18653/v1/2022.emnlp-main.147\",\n pages = \"2308--2321\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.147.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.147/", + "pdf_size": 555231, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6036340550341351482&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff": "UIUC; UIUC; EthonAI; LMU Munich; UIUC", + "aff_domain": "illinois.edu;illinois.edu;ethon.ai;lmu.de;illinois.edu", + "email": "illinois.edu;illinois.edu;ethon.ai;lmu.de;illinois.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;EthonAI;Ludwig Maximilian University of Munich", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www illinois.edu;https://ethonai.com;https://www.lmu.de", + "aff_unique_abbr": "UIUC;EthonAI;LMU", + "aff_campus_unique_index": "0;0;2;0", + "aff_campus_unique": "Urbana-Champaign;;Munich", + "aff_country_unique_index": "0;0;0;1;0", + "aff_country_unique": "United States;Germany" + }, + { + "id": "2022.emnlp-main.528", + "title": "QASem Parsing: Text-to-text Modeling of QA-based Semantics", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Various works suggest the appeals of incorporating explicit semantic representations when addressing challenging realistic NLP scenarios. Common approaches offer either comprehensive linguistically-based formalisms, like AMR, or alternatively Open-IE, which provides a shallow and partial representation. More recently, an appealing trend introduces semi-structured natural-language structures as an intermediate meaning-capturing representation, often in the form of questions and answers.In this work, we further promote this line of research by considering three prior QA-based semantic representations. These cover verbal, nominalized and discourse-based predications, regarded as jointly providing a comprehensive representation of textual information \u2014 termed QASem. To facilitate this perspective, we investigate how to best utilize pre-trained sequence-to-sequence language models, which seem particularly promising for generating representations that consist of natural language expressions (questions and answers). In particular, we examine and analyze input and output linearization strategies, as well as data augmentation and multitask learning for a scarce training data setup. Consequently, we release the first unified QASem parsing tool, easily applicable for downstream tasks that can benefit from an explicit semi-structured account of information units in text.", + "author": "Ayal Klein; Eran Hirsch; Ron Eliav; Valentina Pyatkin; Avi Caciularu; Ido Dagan", + "authorids": "/a/ayal-klein/; /e/eran-hirsch/; /r/ron-eliav/; /v/valentina-pyatkin/; /a/avi-caciularu/; /i/ido-dagan/", + "bibtex": "@inproceedings{klein-etal-2022-qasem,\n title = \"{QAS}em Parsing: Text-to-text Modeling of {QA}-based Semantics\",\n author = \"Klein, Ayal and\n Hirsch, Eran and\n Eliav, Ron and\n Pyatkin, Valentina and\n Caciularu, Avi and\n Dagan, Ido\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.528/\",\n doi = \"10.18653/v1/2022.emnlp-main.528\",\n pages = \"7742--7756\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.528.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.528/", + "pdf_size": 259799, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9800018539796455773&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Computer Science Department, Bar-Ilan University; Computer Science Department, Bar-Ilan University; Computer Science Department, Bar-Ilan University; Computer Science Department, Bar-Ilan University; Computer Science Department, Bar-Ilan University; Computer Science Department, Bar-Ilan University", + "aff_domain": "gmail.com;gmail.com;gmail.com;gmail.com;gmail.com;cs.biu.ac.il", + "email": "gmail.com;gmail.com;gmail.com;gmail.com;gmail.com;cs.biu.ac.il", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Bar-Ilan University", + "aff_unique_dep": "Computer Science Department", + "aff_unique_url": "https://www.biu.ac.il", + "aff_unique_abbr": "BIU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.emnlp-main.37", + "title": "QRelScore: Better Evaluating Generated Questions with Deeper Understanding of Context-aware Relevance", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing metrics for assessing question generation not only require costly human reference but also fail to take into account the input context of generation, rendering the lack of deep understanding of the relevance between the generated questions and input contexts. As a result, they may wrongly penalize a legitimate and reasonable candidate question when it (1) involves complicated reasoning with the context or (2) can be grounded by multiple evidences in the context.In this paper, we propose QRelScore, a context-aware Relevance evaluation metric for Question Generation.Based on off-the-shelf language models such as BERT and GPT2, QRelScore employs both word-level hierarchical matching and sentence-level prompt-based generation to cope with the complicated reasoning and diverse generation from multiple evidences, respectively.Compared with existing metrics, our experiments demonstrate that QRelScore is able to achieve a higher correlation with human judgments while being much more robust to adversarial samples.", + "author": "Xiaoqiang Wang; Bang Liu; Siliang Tang; Lingfei Wu", + "authorids": "/x/xiaoqiang-wang/; /b/bang-liu/; /s/siliang-tang/; /l/lingfei-wu/", + "bibtex": "@inproceedings{wang-etal-2022-qrelscore,\n title = \"{QR}el{S}core: Better Evaluating Generated Questions with Deeper Understanding of Context-aware Relevance\",\n author = \"Wang, Xiaoqiang and\n Liu, Bang and\n Tang, Siliang and\n Wu, Lingfei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.37/\",\n doi = \"10.18653/v1/2022.emnlp-main.37\",\n pages = \"562--581\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.37.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.37/", + "pdf_size": 878696, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4128846993169807782&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Zhejiang University; Universit\u00e9 de Montr\u00e9al & Mila; Zhejiang University; Pinterest", + "aff_domain": "zju.edu.cn;umontreal.ca;zju.edu.cn;email.wm.edu", + "email": "zju.edu.cn;umontreal.ca;zju.edu.cn;email.wm.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "Zhejiang University;Universit\u00e9 de Montr\u00e9al;Pinterest", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.zju.edu.cn;https://www.umontreal.ca;https://www.pinterest.com", + "aff_unique_abbr": "ZJU;UdeM;Pinterest", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;2", + "aff_country_unique": "China;Canada;United States" + }, + { + "id": "2022.emnlp-industry.50", + "title": "QUILL: Query Intent with Large Language Models using Retrieval Augmentation and Multi-stage Distillation", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Large Language Models (LLMs) have shown impressive results on a variety of text understanding tasks. Search queries though pose a unique challenge, given their short-length and lack of nuance or context. Complicated feature engineering efforts do not always lead to downstream improvements as their performance benefits may be offset by increased complexity of knowledge distillation. Thus, in this paper we make the following contributions: (1) We demonstrate that Retrieval Augmentation of queries provides LLMs with valuable additional context enabling improved understanding. While Retrieval Augmentation typically increases latency of LMs (thus hurting distillation efficacy), (2) we provide a practical and effective way of distilling Retrieval Augmentation LLMs. Specifically, we use a novel two-stage distillation approach that allows us to carry over the gains of retrieval augmentation, without suffering the increased compute typically associated with it. (3) We demonstrate the benefits of the proposed approach (QUILL) on a billion-scale, real-world query understanding system resulting in huge gains. Via extensive experiments, including on public benchmarks, we believe this work offers a recipe for practical use of retrieval-augmented query understanding.", + "author": "Krishna Srinivasan; Karthik Raman; Anupam Samanta; Lingrui Liao; Luca Bertelli; Michael Bendersky", + "authorids": "/k/krishna-srinivasan/; /k/karthik-raman/; /a/anupam-samanta/; /l/lingrui-liao/; /l/luca-bertelli/; /m/michael-bendersky/", + "bibtex": "@inproceedings{srinivasan-etal-2022-quill,\n title = \"{QUILL}: Query Intent with Large Language Models using Retrieval Augmentation and Multi-stage Distillation\",\n author = \"Srinivasan, Krishna and\n Raman, Karthik and\n Samanta, Anupam and\n Liao, Lingrui and\n Bertelli, Luca and\n Bendersky, Michael\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.50/\",\n doi = \"10.18653/v1/2022.emnlp-industry.50\",\n pages = \"492--501\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.50.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.50/", + "pdf_size": 287207, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2900783561148464360&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Google Research; Google Research; Google; Google; Google; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.229", + "title": "QaDialMoE: Question-answering Dialogue based Fact Verification with Mixture of Experts", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Fact verification is an essential tool to mitigate the spread of false information online, which has gained a widespread attention recently. However, a fact verification in the question-answering dialogue is still underexplored. In this paper, we propose a neural network based approach called question-answering dialogue based fact verification with mixture of experts (QaDialMoE). It exploits questions and evidence effectively in the verification process and can significantly improve the performance of fact verification. Specifically, we exploit the mixture of experts to focus on various interactions among responses, questions and evidence. A manager with an attention guidance module is implemented to guide the training of experts and assign a reasonable attention score to each expert. A prompt module is developed to generate synthetic questions that make our approach more generalizable. Finally, we evaluate the QaDialMoE and conduct a comparative study on three benchmark datasets. The experimental results demonstrate that our QaDialMoE outperforms previous approaches by a large margin and achieves new state-of-the-art results on all benchmarks. This includes the accuracy improvements on the HEALTHVER as 84.26%, the FAVIQ A dev set as 78.7%, the FAVIQ R dev set as 86.1%, test set as 86.0%, and the COLLOQUIAL as 89.5%. To our best knowledge, this is the first work to investigate a question-answering dialogue based fact verification, and achieves new state-of-the-art results on various benchmark datasets.", + "author": "Longzheng Wang; Peng Zhang; Xiaoyu Lu; Lei Zhang; Chaoyang Yan; Chuang Zhang", + "authorids": "/l/longzheng-wang/; /p/peng-zhang/; /x/xiaoyu-lu/; /l/lei-zhang/; /c/chaoyang-yan/; /c/chuang-zhang/", + "bibtex": "@inproceedings{wang-etal-2022-qadialmoe,\n title = \"{Q}a{D}ial{M}o{E}: Question-answering Dialogue based Fact Verification with Mixture of Experts\",\n author = \"Wang, Longzheng and\n Zhang, Peng and\n Lu, Xiaoyu and\n Zhang, Lei and\n Yan, Chaoyang and\n Zhang, Chuang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.229/\",\n doi = \"10.18653/v1/2022.findings-emnlp.229\",\n pages = \"3146--3159\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.229.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.229/", + "pdf_size": 555071, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14211594078501344852&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences; School of Cyber Security, University of Chinese Academy of Sciences+School of Cyber Security, Nanjing University of Science and Technology; School of Cyber Security, Nanjing University of Science and Technology; Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences; Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences; Institute of Information Engineering, Chinese Academy of Sciences", + "aff_domain": "iie.ac.cn;iie.ac.cn;njust.edu.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn", + "email": "iie.ac.cn;iie.ac.cn;njust.edu.cn;iie.ac.cn;iie.ac.cn;iie.ac.cn", + "github": "https://github.com/wishever/QaDialMoE", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1+2;2;0+1;0+1;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Nanjing University of Science and Technology", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;School of Cyber Security", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;http://www.nust.edu.cn", + "aff_unique_abbr": "CAS;UCAS;", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.185", + "title": "Quadapter: Adapter for GPT-2 Quantization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Transformer language models such as GPT-2 are difficult to quantize because of outliers in the activations leading to a large quantization error. To adapt to the error, one must use quantization-aware training, which entails a fine-tuning process based on the dataset and the training pipeline identical to those for the original model. Pretrained language models, however, often do not grant access to their datasets and training pipelines, forcing us to rely on arbitrary ones for fine-tuning. In that case, it is observed that quantization-aware training overfits the model to the fine-tuning data. To this end introduced is a quantization adapter (Quadapter), a small set of parameters that are learned to make activations quantization-friendly by scaling them channel-wise.For quantization without overfitting, we introduce a quantization adapter (Quadapter), a small set of parameters that are learned to make activations quantization-friendly by scaling them channel-wise. It keeps the model parameters unchanged. By applying our method to the challenging task of quantizing GPT-2, we demonstrate that it effectively prevents the overfitting and improves the quantization performance.", + "author": "Minseop Park; Jaeseong You; Markus Nagel; Simyung Chang", + "authorids": "/m/minseop-park/; /j/jaeseong-you/; /m/markus-nagel/; /s/simyung-chang/", + "bibtex": "@inproceedings{park-etal-2022-quadapter,\n title = \"Quadapter: Adapter for {GPT}-2 Quantization\",\n author = \"Park, Minseop and\n You, Jaeseong and\n Nagel, Markus and\n Chang, Simyung\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.185/\",\n doi = \"10.18653/v1/2022.findings-emnlp.185\",\n pages = \"2510--2517\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.185.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.185/", + "pdf_size": 834000, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17083802490810021480&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.732", + "title": "Quality Scoring of Source Words in Neural Translation Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Word-level quality scores on input source sentences can provide useful feedback to an end-user when translating into an unfamiliar target language. Recent approaches either require training special word-scoring models based on synthetic data or require repeated invocation of the translation model. We propose a simple approach based on comparing the difference of probabilities from two language models. The basic premise of our method is to reason how well each source word is explained by the target sentence as against the source language model. Our approach provides up to five points higher F1 scores and is significantly faster than the state of the art methods on three language pairs. Also, our method does not require training any new model. We release a public dataset on word omissions and mistranslations on a new language pair.", + "author": "Priyesh Jain; Sunita Sarawagi; Tushar Tomar", + "authorids": "/p/priyesh-jain/; /s/sunita-sarawagi/; /t/tushar-tomar/", + "bibtex": "@inproceedings{jain-etal-2022-quality,\n title = \"Quality Scoring of Source Words in Neural Translation Models\",\n author = \"Jain, Priyesh and\n Sarawagi, Sunita and\n Tomar, Tushar\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.732/\",\n doi = \"10.18653/v1/2022.emnlp-main.732\",\n pages = \"10683--10691\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.732.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.732/", + "pdf_size": 662202, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7666084773758877201&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Indian Institute of Technology Bombay, India; Indian Institute of Technology Bombay, India; Indian Institute of Technology Bombay, India", + "aff_domain": "gmail.com;iitb.ac.in;cse.iitb.ac.in", + "email": "gmail.com;iitb.ac.in;cse.iitb.ac.in", + "github": "https://github.com/jain-priyesh/target-lift.git", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Indian Institute of Technology Bombay", + "aff_unique_dep": "", + "aff_unique_url": "https://www.iitb.ac.in", + "aff_unique_abbr": "IIT Bombay", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Bombay", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.emnlp-main.570", + "title": "Quantifying Privacy Risks of Masked Language Models Using Membership Inference Attacks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The wide adoption and application of Masked language models (MLMs) on sensitive data (from legal to medical) necessitates a thorough quantitative investigation into their privacy vulnerabilities. Prior attempts at measuring leakage of MLMs via membership inference attacks have been inconclusive, implying potential robustness of MLMs to privacy attacks.In this work, we posit that prior attempts were inconclusive because they based their attack solely on the MLM\u2019s model score. We devise a stronger membership inference attack based on likelihood ratio hypothesis testing that involves an additional reference MLM to more accurately quantify the privacy risks of memorization in MLMs. We show that masked language models are indeed susceptible to likelihood ratio membership inference attacks: Our empirical results, on models trained on medical notes, show that our attack improves the AUC of prior membership inference attacks from 0.66 to an alarmingly high 0.90 level.", + "author": "Fatemehsadat Mireshghallah; Kartik Goyal; Archit Uniyal; Taylor Berg-Kirkpatrick; Reza Shokri", + "authorids": "/f/fatemehsadat-mireshghallah/; /k/kartik-goyal/; /a/archit-uniyal/; /t/taylor-berg-kirkpatrick/; /r/reza-shokri/", + "bibtex": "@inproceedings{mireshghallah-etal-2022-quantifying,\n title = \"Quantifying Privacy Risks of Masked Language Models Using Membership Inference Attacks\",\n author = \"Mireshghallah, Fatemehsadat and\n Goyal, Kartik and\n Uniyal, Archit and\n Berg-Kirkpatrick, Taylor and\n Shokri, Reza\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.570/\",\n doi = \"10.18653/v1/2022.emnlp-main.570\",\n pages = \"8332--8347\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.570.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.570/", + "pdf_size": 1650479, + "gs_citation": 177, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12667038573749351992&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "University of California San Diego; Toyota Technological Institute at Chicago (TTIC); University of Virginia; University of California San Diego+National University of Singapore; National University of Singapore", + "aff_domain": "ucsd.edu;ttic.edu;virginia.edu;ucsd.edu;comp.nus.edu.sg", + "email": "ucsd.edu;ttic.edu;virginia.edu;ucsd.edu;comp.nus.edu.sg", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0+3;3", + "aff_unique_norm": "University of California, San Diego;Toyota Technological Institute at Chicago;University of Virginia;National University of Singapore", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://ucsd.edu;https://www.ttic.edu;https://www.virginia.edu;https://www.nus.edu.sg", + "aff_unique_abbr": "UCSD;TTIC;UVA;NUS", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "San Diego;Chicago;", + "aff_country_unique_index": "0;0;0;0+1;1", + "aff_country_unique": "United States;Singapore" + }, + { + "id": "2022.emnlp-main.523", + "title": "Query-based Instance Discrimination Network for Relational Triple Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Joint entity and relation extraction has been a core task in the field of information extraction. Recent approaches usually consider the extraction of relational triples from a stereoscopic perspective, either learning a relation-specific tagger or separate classifiers for each relation type. However, they still suffer from error propagation, relation redundancy and lack of high-level connections between triples. To address these issues, we propose a novel query-based approach to construct instance-level representations for relational triples. By metric-based comparison between query embeddings and token embeddings, we can extract all types of triples in one step, thus eliminating the error propagation problem. In addition, we learn the instance-level representation of relational triples via contrastive learning. In this way, relational triples can not only enclose rich class-level semantics but also access to high-order global connections. Experimental results show that our proposed method achieves the state of the art on five widely used benchmarks.", + "author": "Zeqi Tan; Yongliang Shen; Xuming Hu; Wenqi Zhang; Xiaoxia Cheng; Weiming Lu; Yueting Zhuang", + "authorids": "/z/zeqi-tan/; /y/yongliang-shen/; /x/xuming-hu/; /w/wenqi-zhang/; /x/xiaoxia-cheng/; /w/weiming-lu/; /y/yueting-zhuang/", + "bibtex": "@inproceedings{tan-etal-2022-query,\n title = \"Query-based Instance Discrimination Network for Relational Triple Extraction\",\n author = \"Tan, Zeqi and\n Shen, Yongliang and\n Hu, Xuming and\n Zhang, Wenqi and\n Cheng, Xiaoxia and\n Lu, Weiming and\n Zhuang, Yueting\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.523/\",\n doi = \"10.18653/v1/2022.emnlp-main.523\",\n pages = \"7677--7690\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.523.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.523/", + "pdf_size": 1438102, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7314652415073181526&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Zhejiang University; Zhejiang University; Tsinghua University; Zhejiang University; Zhejiang University; Zhejiang University; Zhejiang University", + "aff_domain": "zju.edu.cn;zju.edu.cn;mails.tsinghua.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;mails.tsinghua.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;0;0;0", + "aff_unique_norm": "Zhejiang University;Tsinghua University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.zju.edu.cn;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "ZJU;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.386", + "title": "Questioning the Validity of Summarization Datasets and Improving Their Factual Consistency", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The topic of summarization evaluation has recently attracted a surge of attention due to the rapid development of abstractive summarization systems. However, the formulation of the task is rather ambiguous, neither the linguistic nor the natural language processing communities have succeeded in giving a mutually agreed-upon definition. Due to this lack of well-defined formulation, a large number of popular abstractive summarization datasets are constructed in a manner that neither guarantees validity nor meets one of the most essential criteria of summarization: factual consistency. In this paper, we address this issue by combining state-of-the-art factual consistency models to identify the problematic instances present in popular summarization datasets. We release SummFC, a filtered summarization dataset with improved factual consistency, and demonstrate that models trained on this dataset achieve improved performance in nearly all quality aspects. We argue that our dataset should become a valid benchmark for developing and evaluating summarization systems.", + "author": "Yanzhu Guo; Chlo\u00e9 Clavel; Moussa Kamal Eddine; Michalis Vazirgiannis", + "authorids": "/y/yanzhu-guo/; /c/chloe-clavel/; /m/moussa-kamal-eddine/; /m/michalis-vazirgiannis/", + "bibtex": "@inproceedings{guo-etal-2022-questioning,\n title = \"Questioning the Validity of Summarization Datasets and Improving Their Factual Consistency\",\n author = \"Guo, Yanzhu and\n Clavel, Chlo{\\'e} and\n Kamal Eddine, Moussa and\n Vazirgiannis, Michalis\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.386/\",\n doi = \"10.18653/v1/2022.emnlp-main.386\",\n pages = \"5716--5727\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.386.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.386/", + "pdf_size": 787421, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=608369411089172084&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "LIX, \u00c9cole Polytechnique, Institut Polytechnique de Paris, France; LTCI, T\u00e9l\u00e9com-Paris, Institut Polytechnique de Paris, France; LIX, \u00c9cole Polytechnique, Institut Polytechnique de Paris, France; LIX, \u00c9cole Polytechnique, Institut Polytechnique de Paris, France", + "aff_domain": "polytechnique.edu;telecom-paris.fr;polytechnique.edu;lix.polytechnique.fr", + "email": "polytechnique.edu;telecom-paris.fr;polytechnique.edu;lix.polytechnique.fr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "\u00c9cole Polytechnique;T\u00e9l\u00e9com-Paris", + "aff_unique_dep": "LIX;LTCI", + "aff_unique_url": "https://www.ecyclepolytech.fr;https://www.telecom-paris.fr", + "aff_unique_abbr": "\u00c9cole Polytechnique;T\u00e9l\u00e9com-Paris", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "France" + }, + { + "id": "2022.findings-emnlp.480", + "title": "R-AT: Regularized Adversarial Training for Natural Language Understanding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Currently, adversarial training has become a popular and powerful regularization method in the natural language domain. In this paper, we Regularized Adversarial Training (R-AT) via dropout, which forces the output probability distributions of different sub-models generated by dropout to be consistent under the same adversarial samples. Specifically, we generate adversarial samples by perturbing the word embeddings. For each adversarial sample fed to the model, R-AT minimizes both the adversarial risk and the bidirectional KL-divergence between the adversarial output distributions of two sub-models sampled by dropout. Through extensive experiments on 13 public natural language understanding datasets, we found that R-AT has improvements for many models (e.g., rnn-based, cnn-based, and transformer-based models). For the GLUE benchmark, when R-AT is only applied to the fine-tuning stage, it is able to improve the overall test score of the BERT-base model from 78.3 to 79.6 and the RoBERTa-large model from 88.1 to 88.6. Theoretical analysis reveals that R-AT has potential gradient regularization during the training process. Furthermore, R-AT can reduce the inconsistency between training and testing of models with dropout.", + "author": "Shiwen Ni; Jiawen Li; Hung-Yu Kao", + "authorids": "/s/shiwen-ni/; /j/jiawen-li/; /h/hung-yu-kao/", + "bibtex": "@inproceedings{ni-etal-2022-r,\n title = \"{R}-{AT}: Regularized Adversarial Training for Natural Language Understanding\",\n author = \"Ni, Shiwen and\n Li, Jiawen and\n Kao, Hung-Yu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.480/\",\n doi = \"10.18653/v1/2022.findings-emnlp.480\",\n pages = \"6427--6440\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.480.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.480/", + "pdf_size": 1244481, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15553369630263377625&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Intelligent Knowledge Management Lab, Department of Computer Science and Information Engineering, National Cheng Kung University, Tainan, Taiwan; Intelligent Knowledge Management Lab, Department of Computer Science and Information Engineering, National Cheng Kung University, Tainan, Taiwan; Intelligent Knowledge Management Lab, Department of Computer Science and Information Engineering, National Cheng Kung University, Tainan, Taiwan", + "aff_domain": "gs.ncku.edu.tw;gs.ncku.edu.tw;mail.ncku.edu.tw", + "email": "gs.ncku.edu.tw;gs.ncku.edu.tw;mail.ncku.edu.tw", + "github": "https://github.com/IKMLab/R-AT", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "National Cheng Kung University", + "aff_unique_dep": "Department of Computer Science and Information Engineering", + "aff_unique_url": "https://www.ncku.edu.tw", + "aff_unique_abbr": "NCKU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Tainan", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "2022.emnlp-main.423", + "title": "R-TeaFor: Regularized Teacher-Forcing for Abstractive Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Teacher-forcing is widely used in training sequence generation models to improve sampling efficiency and to stabilize training. However, teacher-forcing is vulnerable to the exposure bias problem. Previous works have attempted to address exposure bias by modifying the training data to simulate model-generated results. Nevertheless, they do not consider the pairwise relationship between the original training data and the modified ones, which provides more information during training. Hence, we propose Regularized Teacher-Forcing (R-TeaFor) to utilize this relationship for better regularization. Empirically, our experiments show that R-TeaFor outperforms previous summarization state-of-the-art models, and the results can be generalized to different pre-trained models.", + "author": "Guan-Yu Lin; Pu-Jen Cheng", + "authorids": "/g/guan-yu-lin/; /p/pu-jen-cheng/", + "bibtex": "@inproceedings{lin-cheng-2022-r,\n title = \"{R}-{T}ea{F}or: Regularized Teacher-Forcing for Abstractive Summarization\",\n author = \"Lin, Guan-Yu and\n Cheng, Pu-Jen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.423/\",\n doi = \"10.18653/v1/2022.emnlp-main.423\",\n pages = \"6303--6311\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.423.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.423/", + "pdf_size": 2282932, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18403452449373965285&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "National Taiwan University; National Taiwan University", + "aff_domain": "ntu.edu.tw;csie.ntu.edu.tw", + "email": "ntu.edu.tw;csie.ntu.edu.tw", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "National Taiwan University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ntu.edu.tw", + "aff_unique_abbr": "NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "2022.emnlp-main.464", + "title": "R2D2: Robust Data-to-Text with Replacement Detection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Unfaithful text generation is a common problem for text generation systems. In the case of Data-to-Text (D2T) systems, the factuality of the generated text is particularly crucial for any real-world applications. We introduce R2D2, a training framework that addresses unfaithful Data-to-Text generation by training a system both as a generator and a faithfulness discriminator with additional replacement detection and unlikelihood learning tasks. To facilitate such training, we propose two methods for sampling unfaithful sentences. We argue that the poor entity retrieval capability of D2T systems is one of the primary sources of unfaithfulness, so in addition to the existing metrics, we further propose named entity based metrics to evaluate the fidelity of D2T generations. Our experimental results show that R2D2 systems could effectively mitigate the unfaithful text generation, and they achieve new state-of-theart results on FeTaQA, LogicNLG, and ToTTo, all with significant improvements.", + "author": "Linyong Nan; Lorenzo Jaime Flores; Yilun Zhao; Yixin Liu; Luke Benson; Weijin Zou; Dragomir Radev", + "authorids": "/l/linyong-nan/; /l/lorenzo-jaime-flores/; /y/yilun-zhao/; /y/yixin-liu/; /l/luke-benson/; /w/weijin-zou/; /d/dragomir-radev/", + "bibtex": "@inproceedings{nan-etal-2022-r2d2,\n title = \"{R}2{D}2: Robust Data-to-Text with Replacement Detection\",\n author = \"Nan, Linyong and\n Flores, Lorenzo Jaime and\n Zhao, Yilun and\n Liu, Yixin and\n Benson, Luke and\n Zou, Weijin and\n Radev, Dragomir\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.464/\",\n doi = \"10.18653/v1/2022.emnlp-main.464\",\n pages = \"6903--6917\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.464.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.464/", + "pdf_size": 706040, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16184315292744402266&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Yale University; Yale University; Yale University; ; ; ; ", + "aff_domain": "yale.edu;yale.edu;yale.edu; ; ; ; ", + "email": "yale.edu;yale.edu;yale.edu; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Yale University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.yale.edu", + "aff_unique_abbr": "Yale", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.204", + "title": "R2F: A General Retrieval, Reading and Fusion Framework for Document-level Natural Language Inference", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Document-level natural language inference (DOCNLI) is a new challenging task in natural language processing, aiming at judging the entailment relationship between a pair of hypothesis and premise documents. Current datasets and baselines largely follow sentence-level settings, but fail to address the issues raised by longer documents. In this paper, we establish a general solution, named Retrieval, Reading and Fusion (R2F) framework, and a new setting, by analyzing the main challenges of DOCNLI: interpretability, long-range dependency, and cross-sentence inference. The basic idea of the framework is to simplify document-level task into a set of sentence-level tasks, and improve both performance and interpretability with the power of evidence. For each hypothesis sentence, the framework retrieves evidence sentences from the premise, and reads to estimate its credibility. Then the sentence-level results are fused to judge the relationship between the documents. For the setting, we contribute complementary evidence and entailment label annotation on hypothesis sentences, for interpretability study. Our experimental results show that R2F framework can obtain state-of-the-art performance and is robust for diverse evidence retrieval methods. Moreover, it can give more interpretable prediction results. Our model and code are released at https://github.com/phoenixsecularbird/R2F.", + "author": "Hao Wang; Yixin Cao; Yangguang Li; Zhen Huang; Kun Wang; Jing Shao", + "authorids": "/h/hao-wang/; /y/yixin-cao/; /y/yangguang-li/; /z/zhen-huang/; /k/kun-wang/; /j/jing-shao/", + "bibtex": "@inproceedings{wang-etal-2022-r2f,\n title = \"{R}2{F}: A General Retrieval, Reading and Fusion Framework for Document-level Natural Language Inference\",\n author = \"Wang, Hao and\n Cao, Yixin and\n Li, Yangguang and\n Huang, Zhen and\n Wang, Kun and\n Shao, Jing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.204/\",\n doi = \"10.18653/v1/2022.emnlp-main.204\",\n pages = \"3122--3134\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.204.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.204/", + "pdf_size": 908835, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6822433425823967673&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "National University of Defense Technology; Singapore Management University+SenseTime; SenseTime; National University of Defense Technology; SenseTime; SenseTime", + "aff_domain": "nudt.edu.cn;smu.edu.sg;sensetime.com;nudt.edu.cn;sensetime.com;sensetime.com", + "email": "nudt.edu.cn;smu.edu.sg;sensetime.com;nudt.edu.cn;sensetime.com;sensetime.com", + "github": "https://github.com/phoenixsecularbird/R2F", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1+2;2;0;2;2", + "aff_unique_norm": "National University of Defense Technology;Singapore Management University;SenseTime", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.nudt.edu.cn/;https://www.smu.edu.sg;https://www.sensetime.com", + "aff_unique_abbr": "NUDT;SMU;SenseTime", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+0;0;0;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.emnlp-main.372", + "title": "RACE: Retrieval-augmented Commit Message Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Commit messages are important for software development and maintenance. Many neural network-based approaches have been proposed and shown promising results on automatic commit message generation. However, the generated commit messages could be repetitive or redundant. In this paper, we propose RACE, a new retrieval-augmented neural commit message generation method, which treats the retrieved similar commit as an exemplar and leverages it to generate an accurate commit message. As the retrieved commit message may not always accurately describe the content/intent of the current code diff, we also propose an exemplar guider, which learns the semantic similarity between the retrieved and current code diff and then guides the generation of commit message based on the similarity. We conduct extensive experiments on a large public dataset with five programming languages. Experimental results show that RACE can outperform all baselines. Furthermore, RACE can boost the performance of existing Seq2Seq models in commit message generation.", + "author": "Ensheng Shi; Yanlin Wang; Wei Tao; Lun Du; Hongyu Zhang; Shi Han; Dongmei Zhang; Hongbin Sun", + "authorids": "/e/ensheng-shi/; /y/yanlin-wang/; /w/wei-tao/; /l/lun-du/; /h/hongyu-zhang/; /s/shi-han/; /d/dongmei-zhang/; /h/hongbin-sun/", + "bibtex": "@inproceedings{shi-etal-2022-race,\n title = \"{RACE}: Retrieval-augmented Commit Message Generation\",\n author = \"Shi, Ensheng and\n Wang, Yanlin and\n Tao, Wei and\n Du, Lun and\n Zhang, Hongyu and\n Han, Shi and\n Zhang, Dongmei and\n Sun, Hongbin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.372/\",\n doi = \"10.18653/v1/2022.emnlp-main.372\",\n pages = \"5520--5530\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.372.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.372/", + "pdf_size": 598856, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7111604640242968948&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/DeepSoftwareAnalytics/RACE", + "project": "", + "author_num": 8 + }, + { + "id": "2022.emnlp-main.606", + "title": "RAPO: An Adaptive Ranking Paradigm for Bilingual Lexicon Induction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Bilingual lexicon induction induces the word translations by aligning independently trained word embeddings in two languages. Existing approaches generally focus on minimizing the distances between words in the aligned pairs, while suffering from low discriminative capability to distinguish the relative orders between positive and negative candidates. In addition, the mapping function is globally shared by all words, whose performance might be hindered by the deviations in the distributions of different languages. In this work, we propose a novel ranking-oriented induction model RAPO to learn personalized mapping function for each word. RAPO is capable of enjoying the merits from the unique characteristics of a single word and the cross-language isomorphism simultaneously. Extensive experimental results on public datasets including both rich-resource and low-resource languages demonstrate the superiority of our proposal. Our code is publicly available in https://github.com/Jlfj345wf/RAPO.", + "author": "Zhoujin Tian; Chaozhuo Li; Shuo Ren; Zhiqiang Zuo; Zengxuan Wen; Xinyue Hu; Xiao Han; Haizhen Huang; Denvy Deng; Qi Zhang; Xing Xie", + "authorids": "/z/zhoujin-tian/; /c/chaozhuo-li/; /s/shuo-ren/; /z/zhiqiang-zuo/; /z/zengxuan-wen/; /x/xinyue-hu/; /x/xiao-han/; /h/haizhen-huang/; /d/denvy-deng/; /q/qi-zhang/; /x/xing-xie/", + "bibtex": "@inproceedings{tian-etal-2022-rapo,\n title = \"{RAPO}: An Adaptive Ranking Paradigm for Bilingual Lexicon Induction\",\n author = \"Tian, Zhoujin and\n Li, Chaozhuo and\n Ren, Shuo and\n Zuo, Zhiqiang and\n Wen, Zengxuan and\n Hu, Xinyue and\n Han, Xiao and\n Huang, Haizhen and\n Deng, Denvy and\n Zhang, Qi and\n Xie, Xing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.606/\",\n doi = \"10.18653/v1/2022.emnlp-main.606\",\n pages = \"8870--8883\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.606.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.606/", + "pdf_size": 674701, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16681525869164574671&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Microsoft+\u2217; Microsoft+\u2020; Microsoft; Microsoft; Microsoft; Microsoft; Microsoft; Microsoft; Microsoft; Microsoft; Microsoft", + "aff_domain": "gmail.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "gmail.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "https://github.com/Jlfj345wf/RAPO", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Microsoft Corporation;", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.microsoft.com;", + "aff_unique_abbr": "Microsoft;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States;" + }, + { + "id": "2022.emnlp-main.211", + "title": "RASAT: Integrating Relational Structures into Pretrained Seq2Seq Model for Text-to-SQL", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Relational structures such as schema linking and schema encoding have been validated as a key component to qualitatively translating natural language into SQL queries. However, introducing these structural relations comes with prices: they often result in a specialized model structure, which largely prohibits using large pretrained models in text-to-SQL. To address this problem, we propose RASAT: a Transformer seq2seq architecture augmented with relation-aware self-attention that could leverage a variety of relational structures while inheriting the pretrained parameters from the T5 model effectively. Our model can incorporate almost all types of existing relations in the literature, and in addition, we propose introducing co-reference relations for the multi-turn scenario. Experimental results on three widely used text-to-SQL datasets, covering both single-turn and multi-turn scenarios, have shown that RASAT could achieve competitive results in all three benchmarks, achieving state-of-the-art execution accuracy (75.5% EX on Spider, 52.6% IEX on SParC, and 37.4% IEX on CoSQL).", + "author": "Jiexing Qi; Jingyao Tang; Ziwei He; Xiangpeng Wan; Yu Cheng; Chenghu Zhou; Xinbing Wang; Quanshi Zhang; Zhouhan Lin", + "authorids": "/j/jiexing-qi/; /j/jingyao-tang/; /z/ziwei-he/; /x/xiangpeng-wan/; /y/yu-cheng/; /c/chenghu-zhou/; /x/xinbing-wang/; /q/quanshi-zhang/; /z/zhouhan-lin/", + "bibtex": "@inproceedings{qi-etal-2022-rasat,\n title = \"{RASAT}: Integrating Relational Structures into Pretrained {S}eq2{S}eq Model for Text-to-{SQL}\",\n author = \"Qi, Jiexing and\n Tang, Jingyao and\n He, Ziwei and\n Wan, Xiangpeng and\n Cheng, Yu and\n Zhou, Chenghu and\n Wang, Xinbing and\n Zhang, Quanshi and\n Lin, Zhouhan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.211/\",\n doi = \"10.18653/v1/2022.emnlp-main.211\",\n pages = \"3215--3229\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.211.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.211/", + "pdf_size": 719148, + "gs_citation": 107, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4695249979927787983&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; NetMind.AI and ProtagoLabs; Microsoft Research; IGSNRR, Chinese Academy of Sciences; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn; ; ; ;gmail.com", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn; ; ; ;gmail.com", + "github": "https://github.com/LUMIA-group/rasat", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;1;2;3;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University;NetMind.AI;Microsoft Corporation;Institute of Geographic Sciences and Natural Resources Research", + "aff_unique_dep": ";;Microsoft Research;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.netmind.ai;https://www.microsoft.com/en-us/research;http://www.igsnrr.cas.cn", + "aff_unique_abbr": "SJTU;NetMind;MSR;IGSNRR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1;2;0;0;0;0", + "aff_country_unique": "China;Spain;United States" + }, + { + "id": "2022.emnlp-main.180", + "title": "RED-ACE: Robust Error Detection for ASR using Confidence Embeddings", + "track": "main", + "status": "Main", + "award": false, + "abstract": "ASR Error Detection (AED) models aim to post-process the output of Automatic Speech Recognition (ASR) systems, in order to detect transcription errors. Modern approaches usually use text-based input, comprised solely of the ASR transcription hypothesis, disregarding additional signals from the ASR model. Instead, we utilize the ASR system\u2019s word-level confidence scores for improving AED performance. Specifically, we add an ASR Confidence Embedding (ACE) layer to the AED model\u2019s encoder, allowing us to jointly encode the confidence scores and the transcribed text into a contextualized representation. Our experiments show the benefits of ASR confidence scores for AED, their complementary effect over the textual signal, as well as the effectiveness and robustness of ACE for combining these signals. To foster further research, we publish a novel AED dataset consisting of ASR outputs on the LibriSpeech corpus with annotated transcription errors.", + "author": "Zorik Gekhman; Dina Zverinski; Jonathan Mallinson; Genady Beryozkin", + "authorids": "/z/zorik-gekhman/; /d/dina-zverinski/; /j/jonathan-mallinson/; /g/genady-beryozkin/", + "bibtex": "@inproceedings{gekhman-etal-2022-red,\n title = \"{RED}-{ACE}: Robust Error Detection for {ASR} using Confidence Embeddings\",\n author = \"Gekhman, Zorik and\n Zverinski, Dina and\n Mallinson, Jonathan and\n Beryozkin, Genady\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.180/\",\n doi = \"10.18653/v1/2022.emnlp-main.180\",\n pages = \"2800--2808\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.180.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.180/", + "pdf_size": 422541, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1416223340659849831&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Google Research; Google Research; Google Research; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com", + "github": "https://github.com/google-research/google-research/tree/master/red-ace", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.77", + "title": "RL with KL penalties is better viewed as Bayesian inference", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Reinforcement learning (RL) is frequently employed in fine-tuning large language models (LMs), such as GPT-3, to penalize them for undesirable features of generated sequences, such as offensiveness, social bias, harmfulness or falsehood. The RL formulation involves treating the LM as a policy and updating it to maximise the expected value of a reward function which captures human preferences, such as non-offensiveness. In this paper, we analyze challenges associated with treating a language model as an RL policy and show how avoiding those challenges requires moving beyond the RL paradigm. We start by observing that the standard RL approach is flawed as an objective for fine-tuning LMs because it leads to distribution collapse: turning the LM into a degenerate distribution. Then, we analyze KL-regularised RL, a widely used recipe for fine-tuning LMs, which additionally constrains the fine-tuned LM to stay close to its original distribution in terms of Kullback-Leibler (KL) divergence. We show that KL-regularised RL is equivalent to variational inference: approximating a Bayesian posterior which specifies how to update a prior LM to conform with evidence provided by the reward function. We argue that this Bayesian inference view of KL-regularised RL is more insightful than the typically employed RL perspective. The Bayesian inference view explains how KL-regularised RL avoids the distribution collapse problem and offers a first-principles derivation for its objective. While this objective happens to be equivalent to RL (with a particular choice of parametric reward), there exist other objectives for fine-tuning LMs which are no longer equivalent to RL. That observation leads to a more general point: RL is not an adequate formal framework for problems such as fine-tuning language models. These problems are best viewed as Bayesian inference: approximating a pre-defined target distribution.", + "author": "Tomasz Korbak; Ethan Perez; Christopher Buckley", + "authorids": "/t/tomasz-korbak/; /e/ethan-perez/; /c/christopher-buckley/", + "bibtex": "@inproceedings{korbak-etal-2022-rl,\n title = \"{RL} with {KL} penalties is better viewed as {B}ayesian inference\",\n author = \"Korbak, Tomasz and\n Perez, Ethan and\n Buckley, Christopher\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.77/\",\n doi = \"10.18653/v1/2022.findings-emnlp.77\",\n pages = \"1083--1091\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.77.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.77/", + "pdf_size": 1048663, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4079216904361564865&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of Sussex + New York University; New York University; University of Sussex", + "aff_domain": "gmail.com;nyu.edu;sussex.ac.uk", + "email": "gmail.com;nyu.edu;sussex.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;0", + "aff_unique_norm": "University of Sussex;New York University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.sussex.ac.uk;https://www.nyu.edu", + "aff_unique_abbr": "Sussex;NYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;0", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "2022.emnlp-main.483", + "title": "RLET: A Reinforcement Learning Based Approach for Explainable QA with Entailment Trees", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Interpreting the reasoning process from questions to answers poses a challenge in approaching explainable QA. A recently proposed structured reasoning format, entailment tree, manages to offer explicit logical deductions with entailment steps in a tree structure. To generate entailment trees, prior single pass sequence-to-sequence models lack visible internal decision probability, while stepwise approaches are supervised with extracted single step data and cannot model the tree as a whole. In this work, we propose RLET, a Reinforcement Learning based Entailment Tree generation framework, which is trained utilising the cumulative signals across the whole tree. RLET iteratively performs single step reasoning with sentence selection and deduction generation modules, from which the training signal is accumulated across the tree with elaborately designed aligned reward function that is consistent with the evaluation. To the best of our knowledge, we are the first to introduce RL into the entailment tree generation task. Experiments on three settings of the EntailmentBank dataset demonstrate the strength of using RL framework.", + "author": "Tengxiao Liu; Qipeng Guo; Xiangkun Hu; Yue Zhang; Xipeng Qiu; Zheng Zhang", + "authorids": "/t/tengxiao-liu/; /q/qipeng-guo/; /x/xiangkun-hu/; /y/yue-zhang/; /x/xipeng-qiu/; /z/zheng-zhang/", + "bibtex": "@inproceedings{liu-etal-2022-rlet,\n title = \"{RLET}: A Reinforcement Learning Based Approach for Explainable {QA} with Entailment Trees\",\n author = \"Liu, Tengxiao and\n Guo, Qipeng and\n Hu, Xiangkun and\n Zhang, Yue and\n Qiu, Xipeng and\n Zhang, Zheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.483/\",\n doi = \"10.18653/v1/2022.emnlp-main.483\",\n pages = \"7177--7189\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.483.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.483/", + "pdf_size": 400272, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13692897477046778271&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "School of Computer Science, Fudan University; Amazon AWS AI; Amazon AWS AI; School of Engineering, Westlake University; School of Computer Science, Fudan University; Amazon AWS AI", + "aff_domain": "m.fudan.edu.cn;amazon.com;amazon.com;westlake.edu.cn;fudan.edu.cn;amazon.com", + "email": "m.fudan.edu.cn;amazon.com;amazon.com;westlake.edu.cn;fudan.edu.cn;amazon.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;2;0;1", + "aff_unique_norm": "Fudan University;Amazon;Westlake University", + "aff_unique_dep": "School of Computer Science;Amazon Web Services AI;School of Engineering", + "aff_unique_url": "https://www.fudan.edu.cn;https://aws.amazon.com;https://www.westlake.edu.cn", + "aff_unique_abbr": "Fudan;AWS;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.222", + "title": "RLPrompt: Optimizing Discrete Text Prompts with Reinforcement Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prompting has shown impressive success in enabling large pre-trained language models (LMs) to perform diverse NLP tasks, especially with only few downstream data. Automatically finding the optimal prompt for each task, however, is challenging. Most existing work resorts to tuning *soft* prompts (e.g., embeddings) which fall short of interpretability, reusability across LMs, and applicability when gradients are not accessible. *Discrete* prompts, on the other hand, are difficult to optimize, and are often created by \u201cenumeration (e.g., paraphrasing)-then-selection\u201d heuristics that do not explore the prompt space systematically. This paper proposes RLPrompt, an efficient discrete prompt optimization approach with reinforcement learning (RL). RLPrompt formulates a parameter-efficient policy network that generates the optimized discrete prompt after training with reward. To harness the complex and stochastic reward signals from the large LM environment, we incorporate effective reward stabilization that substantially enhances training efficiency. RLPrompt is flexibly applicable to different types of LMs, such as masked (e.g., BERT) and left-to-right models (e.g., GPTs), for both classification and generation tasks. Experiments on few-shot classification and unsupervised text style transfer show superior performance over a wide range of existing fine-tuning or prompting methods. Interestingly, the resulting optimized prompts are often ungrammatical gibberish text; and surprisingly, those gibberish prompts are transferrable between different LMs to retain significant performance, indicating that LM prompting may not follow human language patterns.", + "author": "Mingkai Deng; Jianyu Wang; Cheng-Ping Hsieh; Yihan Wang; Han Guo; Tianmin Shu; Meng Song; Eric Xing; Zhiting Hu", + "authorids": "/m/mingkai-deng/; /j/jianyu-wang/; /c/cheng-ping-hsieh/; /y/yihan-wang/; /h/han-guo/; /t/tianmin-shu/; /m/meng-song/; /e/eric-xing/; /z/zhiting-hu/", + "bibtex": "@inproceedings{deng-etal-2022-rlprompt,\n title = \"{RLP}rompt: Optimizing Discrete Text Prompts with Reinforcement Learning\",\n author = \"Deng, Mingkai and\n Wang, Jianyu and\n Hsieh, Cheng-Ping and\n Wang, Yihan and\n Guo, Han and\n Shu, Tianmin and\n Song, Meng and\n Xing, Eric and\n Hu, Zhiting\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.222/\",\n doi = \"10.18653/v1/2022.emnlp-main.222\",\n pages = \"3369--3391\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.222.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.222/", + "pdf_size": 1049088, + "gs_citation": 362, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=532139784671626065&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff": "Carnegie Mellon University; UC San Diego; UC San Diego; UC San Diego; Carnegie Mellon University; MIT; UC San Diego; Carnegie Mellon University+Mohamed bin Zayed University of Artificial Intelligence+Petuum Inc.; UC San Diego", + "aff_domain": "cs.cmu.edu;ucsd.edu;ucsd.edu;ucsd.edu;cs.cmu.edu; ; ; ;ucsd.edu", + "email": "cs.cmu.edu;ucsd.edu;ucsd.edu;ucsd.edu;cs.cmu.edu; ; ; ;ucsd.edu", + "github": "https://github.com/mingkaid/rl-prompt", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;1;1;0;2;1;0+3+4;1", + "aff_unique_norm": "Carnegie Mellon University;University of California, San Diego;Massachusetts Institute of Technology;Mohamed bin Zayed University of Artificial Intelligence;Petuum Inc.", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.cmu.edu;https://www.ucsd.edu;https://web.mit.edu;https://www.mbzuai.ac.ae;https://www.petuum.com", + "aff_unique_abbr": "CMU;UCSD;MIT;MBZUAI;", + "aff_campus_unique_index": "1;1;1;1;;1", + "aff_campus_unique": ";San Diego", + "aff_country_unique_index": "0;0;0;0;0;0;0;0+1+0;0", + "aff_country_unique": "United States;United Arab Emirates" + }, + { + "id": "2022.emnlp-main.186", + "title": "ROSE: Robust Selective Fine-tuning for Pre-trained Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Even though the large-scale language models have achieved excellent performances, they suffer from various adversarial attacks.A large body of defense methods has been proposed. However, they are still limited due to redundant attack search spaces and the inability to defend against various types of attacks.In this work, we present a novel fine-tuning approach called RObust SEletive fine-tuning (ROSE) to address this issue.ROSE conducts selective updates when adapting pre-trained models to downstream tasks, filtering out invaluable and unrobust updates of parameters.Specifically, we propose two strategies: the first-order and second-order ROSE for selecting target robust parameters.The experimental results show that ROSE achieves significant improvements in adversarial robustness on various downstream NLP tasks, and the ensemble method even surpasses both variants above.Furthermore, ROSE can be easily incorporated into existing fine-tuning methods to improve their adversarial robustness further.The empirical analysis confirms that ROSE eliminates unrobust spurious updates during fine-tuning, leading to solutions corresponding to flatter and wider optima than the conventional method.Code is available at https://github.com/jiangllan/ROSE.", + "author": "Lan Jiang; Hao Zhou; Yankai Lin; Peng Li; Jie Zhou; Rui Jiang", + "authorids": "/l/lan-jiang/; /h/hao-zhou/; /y/yankai-lin/; /p/peng-li/; /j/jie-zhou/; /r/rui-jiang/", + "bibtex": "@inproceedings{jiang-etal-2022-rose,\n title = \"{ROSE}: Robust Selective Fine-tuning for Pre-trained Language Models\",\n author = \"Jiang, Lan and\n Zhou, Hao and\n Lin, Yankai and\n Li, Peng and\n Zhou, Jie and\n Jiang, Rui\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.186/\",\n doi = \"10.18653/v1/2022.emnlp-main.186\",\n pages = \"2886--2897\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.186.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.186/", + "pdf_size": 794226, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12796582111234696340&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "MOE Key Laboratory of Bioinformatics, Center for Synthetic and Systems Biology, Department of Automation, BNRist, Tsinghua University, China; Pattern Recognition Center, WeChat AI, Tencent Inc., China; Gaoling School of Artificial Intelligence, Renmin University of China, Beijing, China+Beijing Key Laboratory of Big Data Management and Analysis Methods, Beijing, China; Institute for AI Industry Research (AIR), Tsinghua University, China; Pattern Recognition Center, WeChat AI, Tencent Inc., China; MOE Key Laboratory of Bioinformatics, Center for Synthetic and Systems Biology, Department of Automation, BNRist, Tsinghua University, China", + "aff_domain": "mails.tsinghua.edu.cn; ; ; ; ; ", + "email": "mails.tsinghua.edu.cn; ; ; ; ; ", + "github": "https://github.com/jiangllan/ROSE", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2+3;0;1;0", + "aff_unique_norm": "Tsinghua University;Tencent Inc.;Renmin University of China;Beijing Key Laboratory of Big Data Management and Analysis Methods", + "aff_unique_dep": "Department of Automation;Pattern Recognition Center, WeChat AI;Gaoling School of Artificial Intelligence;Big Data Management and Analysis", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.tencent.com;http://www.ruc.edu.cn;", + "aff_unique_abbr": "THU;Tencent;RUC;", + "aff_campus_unique_index": "1+1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.221", + "title": "RaP: Redundancy-aware Video-language Pre-training for Text-Video Retrieval", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Video language pre-training methods have mainly adopted sparse sampling techniques to alleviate the temporal redundancy of videos. Though effective, sparse sampling still suffers inter-modal redundancy: visual redundancy and textual redundancy. Compared with highly generalized text, sparsely sampled frames usually contain text-independent portions, called visual redundancy. Sparse sampling is also likely to miss important frames corresponding to some text portions, resulting in textual redundancy. Inter-modal redundancy leads to a mismatch of video and text information, hindering the model from better learning the shared semantics across modalities. To alleviate it, we propose Redundancy-aware Video-language Pre-training. We design a redundancy measurement of video patches and text tokens by calculating the cross-modal minimum dis-similarity. Then, we penalize the high-redundant video patches and text tokens through a proposed redundancy-aware contrastive learning. We evaluate our method on four benchmark datasets, MSRVTT, MSVD, DiDeMo, and LSMDC, achieving a significant improvement over the previous state-of-the-art results.", + "author": "Xing Wu; Chaochen Gao; Zijia Lin; Zhongyuan Wang; Jizhong Han; Songlin Hu", + "authorids": "/x/xing-wu/; /c/chaochen-gao/; /z/zijia-lin/; /z/zhongyuan-wang/; /j/jizhong-han/; /s/songlin-hu/", + "bibtex": "@inproceedings{wu-etal-2022-rap,\n title = \"{R}a{P}: Redundancy-aware Video-language Pre-training for Text-Video Retrieval\",\n author = \"Wu, Xing and\n Gao, Chaochen and\n Lin, Zijia and\n Wang, Zhongyuan and\n Han, Jizhong and\n Hu, Songlin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.221/\",\n doi = \"10.18653/v1/2022.findings-emnlp.221\",\n pages = \"3036--3047\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.221.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.221/", + "pdf_size": 3519278, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1806975363145658539&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/caskcsg/VLP/tree/main/RaP", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.611", + "title": "Rainier: Reinforced Knowledge Introspector for Commonsense Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge underpins reasoning. Recent research demonstrates that when relevant knowledge is provided as additional context to commonsense question answering (QA), it can substantially enhance the performance even on top of state-of-the-art. The fundamental challenge is where and how to find such knowledge that is high quality and on point with respect to the question; knowledge retrieved from knowledge bases are incomplete and knowledge generated from language models are inconsistent.We present Rainier, or Reinforced Knowledge Introspector, that learns to generate contextually relevant knowledge in response to given questions. Our approach starts by imitating knowledge generated by GPT-3, then learns to generate its own knowledge via reinforcement learning where rewards are shaped based on the increased performance on the resulting question answering. Rainier demonstrates substantial and consistent performance gains when tested over 9 different commonsense benchmarks: including 5 datasets that are seen during model training, as well as 4 datasets that are kept unseen. Our work is the first to report that knowledge generated by models that are orders of magnitude smaller than GPT-3, even without direct supervision on the knowledge itself, can exceed the quality of commonsense knowledge elicited from GPT-3.", + "author": "Jiacheng Liu; Skyler Hallinan; Ximing Lu; Pengfei He; Sean Welleck; Hannaneh Hajishirzi; Yejin Choi", + "authorids": "/j/jiacheng-liu/; /s/skyler-hallinan/; /x/ximing-lu/; /p/pengfei-he/; /s/sean-welleck/; /h/hannaneh-hajishirzi/; /y/yejin-choi/", + "bibtex": "@inproceedings{liu-etal-2022-rainier,\n title = \"Rainier: Reinforced Knowledge Introspector for Commonsense Question Answering\",\n author = \"Liu, Jiacheng and\n Hallinan, Skyler and\n Lu, Ximing and\n He, Pengfei and\n Welleck, Sean and\n Hajishirzi, Hannaneh and\n Choi, Yejin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.611/\",\n doi = \"10.18653/v1/2022.emnlp-main.611\",\n pages = \"8938--8958\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.611.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.611/", + "pdf_size": 1299362, + "gs_citation": 60, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15482940092094393595&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington\u2661; Allen Institute for Artificial Intelligence\u2660; Paul G. Allen School of Computer Science & Engineering, University of Washington\u2661\u2660; Paul G. Allen School of Computer Science & Engineering, University of Washington\u2661\u2660; Paul G. Allen School of Computer Science & Engineering, University of Washington\u2661\u2660; Paul G. Allen School of Computer Science & Engineering, University of Washington\u2661\u2660; Paul G. Allen School of Computer Science & Engineering, University of Washington\u2661\u2660", + "aff_domain": "cs.washington.edu; ; ; ; ; ; ", + "email": "cs.washington.edu; ; ; ; ; ; ", + "github": "http://github.com/liujch1998/rainier", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;0;0;0;0", + "aff_unique_norm": "University of Washington;Allen Institute for Artificial Intelligence", + "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering;", + "aff_unique_url": "https://www.cs.washington.edu;https://allenai.org", + "aff_unique_abbr": "UW;AI2", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Seattle;", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.15", + "title": "RankGen: Improving Text Generation with Large Ranking Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Given an input sequence (or prefix), modern language models often assign high probabilities to output sequences that are repetitive, incoherent, or irrelevant to the prefix; as such, model-generated text also contains such artifacts. To address these issues we present RankGen, a 1.2B parameter encoder model for English that scores model generations given a prefix. RankGen can be flexibly incorporated as a scoring function in beam search and used to decode from any pretrained language model. We train RankGen using large-scale contrastive learning to map a prefix close to the ground-truth sequence that follows it and far away from two types of negatives: (1) random sequences from the same document as the prefix, and (2) sequences generated from a large language model conditioned on the prefix. Experiments across four different language models (345M-11B parameters) and two domains show that RankGen significantly outperforms decoding algorithms like nucleus, top-k, and typical sampling on both automatic metrics (85.0 vs 77.3 MAUVE) as well as human evaluations with English writers (74.5% human preference over nucleus sampling). Analysis reveals that RankGen outputs are more relevant to the prefix and improve continuity and coherence compared to baselines. We release our model checkpoints, code, and human preference data with explanations to facilitate future research.", + "author": "Kalpesh Krishna; Yapei Chang; John Wieting; Mohit Iyyer", + "authorids": "/k/kalpesh-krishna/; /y/yapei-chang/; /j/john-wieting/; /m/mohit-iyyer/", + "bibtex": "@inproceedings{krishna-etal-2022-rankgen,\n title = \"{R}ank{G}en: Improving Text Generation with Large Ranking Models\",\n author = \"Krishna, Kalpesh and\n Chang, Yapei and\n Wieting, John and\n Iyyer, Mohit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.15/\",\n doi = \"10.18653/v1/2022.emnlp-main.15\",\n pages = \"199--232\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.15.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.15/", + "pdf_size": 1117486, + "gs_citation": 65, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7778190135048367822&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of Massachusetts Amherst; University of Massachusetts Amherst; Google Research; University of Massachusetts Amherst", + "aff_domain": "cs.umass.edu;cs.umass.edu;google.com;cs.umass.edu", + "email": "cs.umass.edu;cs.umass.edu;google.com;cs.umass.edu", + "github": "https://github.com/martiansideofthemoon/rankgen", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "University of Massachusetts Amherst;Google", + "aff_unique_dep": ";Google Research", + "aff_unique_url": "https://www.umass.edu;https://research.google", + "aff_unique_abbr": "UMass Amherst;Google Research", + "aff_campus_unique_index": "0;0;1;0", + "aff_campus_unique": "Amherst;Mountain View", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.204", + "title": "Re-Examining Calibration: The Case of Question Answering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "For users to trust model predictions, they need to understand model outputs, particularly their confidence \u2014 calibration aims to adjust (calibrate) models\u2019 confidence to match expected accuracy. We argue that the traditional calibration evaluation does not promote effective calibrations: for example, it can encourage always assigning a mediocre confidence score to all predictions, which does not help users distinguish correct predictions from wrong ones. Building on those observations, we propose a new calibration metric, MacroCE, that better captures whether the model assigns low confidence to wrong predictions and high confidence to correct predictions. Focusing on the practical application of open-domain question answering, we examine conventional calibration methods applied on the widely-used retriever-reader pipeline, all of which do not bring significant gains under our new MacroCE metric. Toward better calibration, we propose a new calibration method (ConsCal) that uses not just final model predictions but whether multiple model checkpoints make consistent predictions. Altogether, we provide an alternative view of calibration along with a new metric, re-evaluation of existing calibration methods on our metric, and proposal of a more effective calibration method.", + "author": "Chenglei Si; Chen Zhao; Sewon Min; Jordan Boyd-Graber", + "authorids": "/c/chenglei-si/; /c/chen-zhao/; /s/sewon-min/; /j/jordan-boyd-graber/", + "bibtex": "@inproceedings{si-etal-2022-examining,\n title = \"Re-Examining Calibration: The Case of Question Answering\",\n author = \"Si, Chenglei and\n Zhao, Chen and\n Min, Sewon and\n Boyd-Graber, Jordan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.204/\",\n doi = \"10.18653/v1/2022.findings-emnlp.204\",\n pages = \"2814--2829\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.204.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.204/", + "pdf_size": 894820, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1352613596661052692&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 6, + "aff": "University of Maryland; New York University; University of Washington; University of Maryland", + "aff_domain": "umd.edu;nyu.edu;cs.washington.edu;umiacs.umd.edu", + "email": "umd.edu;nyu.edu;cs.washington.edu;umiacs.umd.edu", + "github": "https://github.com/NoviScl/calibrateQA", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "University of Maryland;New York University;University of Washington", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www/umd.edu;https://www.nyu.edu;https://www.washington.edu", + "aff_unique_abbr": "UMD;NYU;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.296", + "title": "Re3: Generating Longer Stories With Recursive Reprompting and Revision", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We consider the problem of automatically generating longer stories of over two thousand words. Compared to prior work on shorter stories, long-range plot coherence and relevance are more central challenges here. We propose the Recursive Reprompting and Revision framework (Re3) to address these challenges by (a) prompting a general-purpose language model to construct a structured overarching plan, and (b) generating story passages by repeatedly injecting contextual information from both the plan and current story state into a language model prompt. We then revise by (c) reranking different continuations for plot coherence and premise relevance, and finally (d) editing the best continuation for factual consistency. Compared to similar-length stories generated directly from the same base model, human evaluators judged substantially more of Re3\u2019s stories as having a coherent overarching plot (by 14% absolute increase), and relevant to the given initial premise (by 20%).", + "author": "Kevin Yang; Yuandong Tian; Nanyun Peng; Dan Klein", + "authorids": "/k/kevin-yang/; /y/yuandong-tian/; /n/nanyun-peng/; /d/dan-klein/", + "bibtex": "@inproceedings{yang-etal-2022-re3,\n title = \"Re3: Generating Longer Stories With Recursive Reprompting and Revision\",\n author = \"Yang, Kevin and\n Tian, Yuandong and\n Peng, Nanyun and\n Klein, Dan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.296/\",\n doi = \"10.18653/v1/2022.emnlp-main.296\",\n pages = \"4393--4479\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.296.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.296/", + "pdf_size": 1477915, + "gs_citation": 188, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12913281204520727323&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "UC Berkeley; Meta AI; UCLA; UC Berkeley", + "aff_domain": "berkeley.edu;berkeley.edu;meta.com;cs.ucla.edu", + "email": "berkeley.edu;berkeley.edu;meta.com;cs.ucla.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "University of California, Berkeley;Meta Platforms, Inc.;University of California, Los Angeles", + "aff_unique_dep": ";Meta AI;", + "aff_unique_url": "https://www.berkeley.edu;https://meta.com;https://www.ucla.edu", + "aff_unique_abbr": "UC Berkeley;Meta;UCLA", + "aff_campus_unique_index": "0;2;0", + "aff_campus_unique": "Berkeley;;Los Angeles", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.431", + "title": "ReCo: Reliable Causal Chain Reasoning via Structural Causal Recurrent Neural Networks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Causal chain reasoning (CCR) is an essential ability for many decision-making AI systems, which requires the model to build reliable causal chains by connecting causal pairs. However, CCR suffers from two main transitive problems: threshold effect and scene drift. In other words, the causal pairs to be spliced may have a conflicting threshold boundary or scenario.To address these issues, we propose a novel Reliable Causal chain reasoning framework (ReCo), which introduces exogenous variables to represent the threshold and scene factors of each causal pair within the causal chain, and estimates the threshold and scene contradictions across exogenous variables via structural causal recurrent neural networks (SRNN). Experiments show that ReCo outperforms a series of strong baselines on both Chinese and English CCR datasets. Moreover, by injecting reliable causal chain knowledge distilled by ReCo, BERT can achieve better performances on four downstream causal-related tasks than BERT models enhanced by other kinds of knowledge.", + "author": "Kai Xiong; Xiao Ding; Zhongyang Li; Li Du; Ting Liu; Bing Qin; Yi Zheng; Baoxing Huai", + "authorids": "/k/kai-xiong/; /x/xiao-ding/; /z/zhongyang-li/; /l/li-du/; /t/ting-liu/; /b/bing-qin/; /y/yi-zheng/; /b/baoxing-huai/", + "bibtex": "@inproceedings{xiong-etal-2022-reco,\n title = \"{R}e{C}o: Reliable Causal Chain Reasoning via Structural Causal Recurrent Neural Networks\",\n author = \"Xiong, Kai and\n Ding, Xiao and\n Li, Zhongyang and\n Du, Li and\n Liu, Ting and\n Qin, Bing and\n Zheng, Yi and\n Huai, Baoxing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.431/\",\n doi = \"10.18653/v1/2022.emnlp-main.431\",\n pages = \"6426--6438\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.431.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.431/", + "pdf_size": 1307351, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9036193698768731624&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Huawei Cloud, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Huawei Cloud, China; Huawei Cloud, China", + "aff_domain": "ir.hit.edu.cn;ir.hit.edu.cn;huawei.com;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;huawei.com;huawei.com", + "email": "ir.hit.edu.cn;ir.hit.edu.cn;huawei.com;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;0;0;0;1;1", + "aff_unique_norm": "Harbin Institute of Technology;Huawei Cloud", + "aff_unique_dep": "Research Center for Social Computing and Information Retrieval;", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.huaweicloud.com", + "aff_unique_abbr": "HIT;Huawei Cloud", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.46", + "title": "ReSel: N-ary Relation Extraction from Scientific Text and Tables by Learning to Retrieve and Select", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We study the problem of extracting N-ary relation tuples from scientific articles. This task is challenging because the target knowledge tuples can reside in multiple parts and modalities of the document. Our proposed method ReSel decomposes this task into a two-stage procedure that first retrieves the most relevant paragraph/table and then selects the target entity from the retrieved component. For the high-level retrieval stage, ReSel designs a simple and effective feature set, which captures multi-level lexical and semantic similarities between the query and components. For the low-level selection stage, ReSel designs a cross-modal entity correlation graph along with a multi-view architecture, which models both semantic and document-structural relations between entities. Our experiments on three scientific information extraction datasets show that ReSel outperforms state-of-the-art baselines significantly.", + "author": "Yuchen Zhuang; Yinghao Li; Junyang Zhang; Yue Yu; Yingjun Mou; Xiang Chen; Le Song; Chao Zhang", + "authorids": "/y/yuchen-zhuang/; /y/yinghao-li/; /j/junyang-zhang/; /y/yue-yu/; /y/yingjun-mou/; /x/xiang-chen/; /l/le-song/; /c/chao-zhang-tu/", + "bibtex": "@inproceedings{zhuang-etal-2022-resel,\n title = \"{R}e{S}el: N-ary Relation Extraction from Scientific Text and Tables by Learning to Retrieve and Select\",\n author = \"Zhuang, Yuchen and\n Li, Yinghao and\n Zhang, Junyang and\n Yu, Yue and\n Mou, Yingjun and\n Chen, Xiang and\n Song, Le and\n Zhang, Chao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.46/\",\n doi = \"10.18653/v1/2022.emnlp-main.46\",\n pages = \"730--744\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.46.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.46/", + "pdf_size": 1377392, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1818525249314855671&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology; Adobe Research; MBZUAI; BioMap", + "aff_domain": "gatech.edu;gatech.edu;gatech.edu;gatech.edu;gatech.edu;adobe.com;mbzuai.ac.ae;gatech.edu", + "email": "gatech.edu;gatech.edu;gatech.edu;gatech.edu;gatech.edu;adobe.com;mbzuai.ac.ae;gatech.edu", + "github": "https://github.com/night-chen/ReSel", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;1;2;3", + "aff_unique_norm": "Georgia Institute of Technology;Adobe;Mohamed Bin Zayed University of Artificial Intelligence;BioMap", + "aff_unique_dep": ";Adobe Research;;", + "aff_unique_url": "https://www.gatech.edu;https://research.adobe.com;https://www.mbzuai.ac.ae;", + "aff_unique_abbr": "Georgia Tech;Adobe;MBZUAI;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;1", + "aff_country_unique": "United States;United Arab Emirates;" + }, + { + "id": "2022.findings-emnlp.181", + "title": "ReaRev: Adaptive Reasoning for Question Answering over Knowledge Graphs", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Knowledge Graph Question Answering (KGQA) involves retrieving entities as answers from a Knowledge Graph (KG) using natural language queries. The challenge is to learn to reason over question-relevant KG facts that traverse KG entities and lead to the question answers. To facilitate reasoning, the question is decoded into instructions, which are dense question representations used to guide the KG traversals. However, if the derived instructions do not exactly match the underlying KG information, they may lead to reasoning under irrelevant context.Our method, termed ReaRev, introduces a new way to KGQA reasoning with respectto both instruction decoding and execution. To improve instruction decoding, we perform reasoning in an adaptive manner, where KG-aware information is used to iteratively update the initial instructions. To improve instruction execution, we emulate breadth-first search (BFS) with graph neural networks (GNNs). The BFS strategy treats the instructions as a set and allows our method to decide on their execution order on the fly. Experimental results on three KGQA benchmarks demonstrate the ReaRev\u2019s effectiveness compared with previous state-of-the-art, especially when the KG is incomplete or when we tackle complex questions. Our code is publicly available at https://github.com/cmavro/ReaRev_KGQA.", + "author": "Costas Mavromatis; George Karypis", + "authorids": "/c/costas-mavromatis/; /g/george-karypis/", + "bibtex": "@inproceedings{mavromatis-karypis-2022-rearev,\n title = \"{R}ea{R}ev: Adaptive Reasoning for Question Answering over Knowledge Graphs\",\n author = \"Mavromatis, Costas and\n Karypis, George\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.181/\",\n doi = \"10.18653/v1/2022.findings-emnlp.181\",\n pages = \"2447--2458\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.181.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.181/", + "pdf_size": 314130, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5833946652098679653&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of Minnesota, USA; University of Minnesota, USA", + "aff_domain": "umn.edu;umn.edu", + "email": "umn.edu;umn.edu", + "github": "https://github.com/cmavro/ReaRev_KGQA", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Minnesota", + "aff_unique_dep": "", + "aff_unique_url": "https://www.minnesota.edu", + "aff_unique_abbr": "UMN", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.343", + "title": "Readability Controllable Biomedical Document Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Different from general documents, it is recognised that the ease with which people can understand a biomedical text is eminently varied, owing to the highly technical nature of biomedical documents and the variance of readers\u2019 domain knowledge. However, existing biomedical document summarization systems have paid little attention to readability control, leaving users with summaries that are incompatible with their levels of expertise.In recognition of this urgent demand, we introduce a new task of readability controllable summarization for biomedical documents, which aims to recognise users\u2019 readability demands and generate summaries that better suit their needs: technical summaries for experts and plain language summaries (PLS) for laymen.To establish this task, we construct a corpus consisting of biomedical papers with technical summaries and PLSs written by the authors, and benchmark multiple advanced controllable abstractive and extractive summarization models based on pre-trained language models (PLMs) with prevalent controlling and generation techniques.Moreover, we propose a novel masked language model (MLM) based metric and its variant to effectively evaluate the readability discrepancy between lay and technical summaries.Experimental results from automated and human evaluations show that though current control techniques allow for a certain degree of readability adjustment during generation, the performance of existing controllable summarization methods is far from desirable in this task.", + "author": "Zheheng Luo; Qianqian Xie; Sophia Ananiadou", + "authorids": "/z/zheheng-luo/; /q/qianqian-xie/; /s/sophia-ananiadou/", + "bibtex": "@inproceedings{luo-etal-2022-readability,\n title = \"Readability Controllable Biomedical Document Summarization\",\n author = \"Luo, Zheheng and\n Xie, Qianqian and\n Ananiadou, Sophia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.343/\",\n doi = \"10.18653/v1/2022.findings-emnlp.343\",\n pages = \"4667--4680\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.343.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.343/", + "pdf_size": 600119, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3505169380471254265&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "NaCTeM, The University of Manchester; NaCTeM, The University of Manchester; NaCTeM, The University of Manchester", + "aff_domain": "manchester.ac.uk;manchester.ac.uk;manchester.ac.uk", + "email": "manchester.ac.uk;manchester.ac.uk;manchester.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "The University of Manchester", + "aff_unique_dep": "NaCTeM", + "aff_unique_url": "https://www.manchester.ac.uk", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.findings-emnlp.324", + "title": "Realistic Data Augmentation Framework for Enhancing Tabular Reasoning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Existing approaches to constructing training data for Natural Language Inference (NLI) tasks, such as for semi-structured table reasoning, are either via crowdsourcing or fully automatic methods. However, the former is expensive and time consuming and thus limits scale, and the latter often produces naive examples that may lack complex reasoning. This paper develops a realistic semi-automated framework for data augmentation for tabular inference. Instead of manually generating a hypothesis for each table, our methodology generates hypothesis templates transferable to similar tables. In addition, our framework entails the creation of rational counterfactual tables based on human written logical constraints and premise paraphrasing. For our case study, we use the INFOTABS (Gupta et al., 2020), which is an entity centric tabular inference dataset. We observed that our framework could generate human-like tabular inference examples, which could benefit training data augmentation, especially in the scenario with limited supervision.", + "author": "Dibyakanti Kumar; Vivek Gupta; Soumya Sharma; Shuo Zhang", + "authorids": "/d/dibyakanti-kumar/; /v/vivek-gupta/; /s/soumya-sharma/; /s/shuo-zhang/", + "bibtex": "@inproceedings{kumar-etal-2022-realistic,\n title = \"Realistic Data Augmentation Framework for Enhancing Tabular Reasoning\",\n author = \"Kumar, Dibyakanti and\n Gupta, Vivek and\n Sharma, Soumya and\n Zhang, Shuo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.324/\",\n doi = \"10.18653/v1/2022.findings-emnlp.324\",\n pages = \"4411--4429\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.324.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.324/", + "pdf_size": 643114, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13112767405526460001&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "IIT Guwahati; University of Utah; IIT Kharagpur; Bloomberg", + "aff_domain": "iitg.ac.in;cs.utah.edu;gmail.com;bloomberg.net", + "email": "iitg.ac.in;cs.utah.edu;gmail.com;bloomberg.net", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Indian Institute of Technology Guwahati;University of Utah;Indian Institute of Technology Kharagpur;Bloomberg", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.iitg.ac.in;https://www.utah.edu;https://www.iitkgp.ac.in;https://www.bloomberg.com", + "aff_unique_abbr": "IITG;Utah;IIT KGP;Bloomberg", + "aff_campus_unique_index": "0;2", + "aff_campus_unique": "Guwahati;;Kharagpur", + "aff_country_unique_index": "0;1;0;1", + "aff_country_unique": "India;United States" + }, + { + "id": "2022.emnlp-main.615", + "title": "ReasTAP: Injecting Table Reasoning Skills During Pre-training via Synthetic Reasoning Examples", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Reasoning over tabular data requires both table structure understanding and a broad set of table reasoning skills. Current models with table-specific architectures and pre-training methods perform well on understanding table structures, but they still struggle with tasks that require various table reasoning skills. In this work, we develop ReasTAP to show that high-level table reasoning skills can be injected into models during pre-training without a complex table-specific architecture design. We define 7 table reasoning skills, such as numerical operation, temporal comparison, and conjunction. Each reasoning skill is associated with one example generator, which synthesizes questions over semi-structured tables according to the sampled templates. We model the table pre-training task as a sequence generation task and pre-train ReasTAP to generate precise answers of the synthetic examples. ReasTAP is evaluated on four benchmarks covering three downstream tasks including 1) WikiSQL-Weak and WikiTQ for Table Question Answering, 2) TabFact for Table Fact Verification, and 3) LogicNLG for Faithful Table-to-Text Generation. Experimental results demonstrate that ReasTAP achieves new state-of-the-art results on all of them and delivers a significant improvement under low-resource setting. Our code is publicly available at https://github.com/Yale-LILY/ReasTAP.", + "author": "Yilun Zhao; Linyong Nan; Zhenting Qi; Rui Zhang; Dragomir Radev", + "authorids": "/y/yilun-zhao/; /l/linyong-nan/; /z/zhenting-qi/; /r/rui-zhang/; /d/dragomir-radev/", + "bibtex": "@inproceedings{zhao-etal-2022-reastap,\n title = \"{R}eas{TAP}: Injecting Table Reasoning Skills During Pre-training via Synthetic Reasoning Examples\",\n author = \"Zhao, Yilun and\n Nan, Linyong and\n Qi, Zhenting and\n Zhang, Rui and\n Radev, Dragomir\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.615/\",\n doi = \"10.18653/v1/2022.emnlp-main.615\",\n pages = \"9006--9018\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.615.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.615/", + "pdf_size": 514027, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1081496259562949258&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Yale University; Yale University; Zhejiang University; Penn State University; Yale University", + "aff_domain": "yale.edu;yale.edu; ; ;yale.edu", + "email": "yale.edu;yale.edu; ; ;yale.edu", + "github": "https://github.com/Yale-LILY/ReasTAP", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;0", + "aff_unique_norm": "Yale University;Zhejiang University;Penn State University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.yale.edu;https://www.zju.edu.cn;https://www.psu.edu", + "aff_unique_abbr": "Yale;ZJU;PSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.527", + "title": "Reason first, then respond: Modular Generation for Knowledge-infused Dialogue", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large language models can produce fluent dialogue but often hallucinate factual inaccuracies. While retrieval-augmented models help alleviate this issue, they still face a difficult challenge of both reasoning to provide correct knowledge and generating conversation simultaneously. In this work, we propose a modular model, Knowledge to Response (K2R), for incorporating knowledge into conversational agents, which breaks down this problem into two easier steps. K2R first generates a knowledge sequence, given a dialogue context, as an intermediate step. After this \u201creasoning step\u201d, the model then attends to its own generated knowledge sequence, as well as the dialogue context, to produce a final response. In detailed experiments, we find that such a model hallucinates less in knowledge-grounded dialogue tasks, and has advantages in terms of interpretability and modularity. In particular, it can be used to fuse QA and dialogue systems together to enable dialogue agents to give knowledgeable answers, or QA models to give conversational responses in a zero-shot setting.", + "author": "Leonard Adolphs; Kurt Shuster; Jack Urbanek; Arthur Szlam; Jason Weston", + "authorids": "/l/leonard-adolphs/; /k/kurt-shuster/; /j/jack-urbanek/; /a/arthur-szlam/; /j/jason-weston/", + "bibtex": "@inproceedings{adolphs-etal-2022-reason,\n title = \"Reason first, then respond: Modular Generation for Knowledge-infused Dialogue\",\n author = \"Adolphs, Leonard and\n Shuster, Kurt and\n Urbanek, Jack and\n Szlam, Arthur and\n Weston, Jason\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.527/\",\n doi = \"10.18653/v1/2022.findings-emnlp.527\",\n pages = \"7112--7132\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.527.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.527/", + "pdf_size": 830589, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6478942389767207025&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "ETH Z\u00fcrich; Meta AI; Meta AI; Meta AI; Meta AI", + "aff_domain": "inf.ethz.ch; ; ; ; ", + "email": "inf.ethz.ch; ; ; ; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;1", + "aff_unique_norm": "ETH Z\u00fcrich;Meta Platforms, Inc.", + "aff_unique_dep": ";Meta AI", + "aff_unique_url": "https://www.ethz.ch;https://meta.com", + "aff_unique_abbr": "ETHZ;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1", + "aff_country_unique": "Switzerland;United States" + }, + { + "id": "2022.emnlp-main.48", + "title": "Reasoning Like Program Executors", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Reasoning over natural language is a long-standing goal for the research community. However, studies have shown that existing language models are inadequate in reasoning. To address the issue, we present POET, a novel reasoning pre-training paradigm. Through pre-training language models with programs and their execution results, POET empowers language models to harvest the reasoning knowledge possessed by program executors via a data-driven approach. POET is conceptually simple and can be instantiated by different kinds of program executors. In this paper, we showcase two simple instances POET-Math and POET-Logic, in addition to a complex instance, POET-SQL. Experimental results on six benchmarks demonstrate that POET can significantly boost model performance in natural language reasoning, such as numerical reasoning, logical reasoning, and multi-hop reasoning. POET opens a new gate on reasoning-enhancement pre-training, and we hope our analysis would shed light on the future research of reasoning like program executors.", + "author": "Xinyu Pi; Qian Liu; Bei Chen; Morteza Ziyadi; Zeqi Lin; Qiang Fu; Yan Gao; Jian-Guang Lou; Weizhu Chen", + "authorids": "/x/xinyu-pi/; /q/qian-liu/; /b/bei-chen/; /m/morteza-ziyadi/; /z/zeqi-lin/; /q/qiang-fu/; /y/yan-gao/; /j/jian-guang-lou/; /w/weizhu-chen/", + "bibtex": "@inproceedings{pi-etal-2022-reasoning,\n title = \"Reasoning Like Program Executors\",\n author = \"Pi, Xinyu and\n Liu, Qian and\n Chen, Bei and\n Ziyadi, Morteza and\n Lin, Zeqi and\n Fu, Qiang and\n Gao, Yan and\n Lou, Jian-Guang and\n Chen, Weizhu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.48/\",\n doi = \"10.18653/v1/2022.emnlp-main.48\",\n pages = \"761--779\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.48.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.48/", + "pdf_size": 724095, + "gs_citation": 65, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2753246699664240397&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Illinois Urbana-Champaign, Urbana, USA; Sea AI Lab, Singapore; Microsoft Research Asia, Beijing, China; Microsoft Azure AI, Redmond, WA, USA; Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China; Microsoft Research Asia, Beijing, China; Microsoft Azure AI, Redmond, WA, USA", + "aff_domain": "illinois.edu;sea.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "illinois.edu;sea.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;2;3;2;2;2;2;3", + "aff_unique_norm": "University of Illinois Urbana-Champaign;Sea AI Lab;Microsoft Research Asia;Microsoft", + "aff_unique_dep": ";;Research;Azure AI", + "aff_unique_url": "https://illinois.edu;;https://www.microsoft.com/en-us/research/group/asia;https://azure.microsoft.com", + "aff_unique_abbr": "UIUC;;MSRA;MSFT", + "aff_campus_unique_index": "0;2;3;2;2;2;2;3", + "aff_campus_unique": "Urbana;;Beijing;Redmond", + "aff_country_unique_index": "0;1;2;0;2;2;2;2;0", + "aff_country_unique": "United States;Singapore;China" + }, + { + "id": "2022.findings-emnlp.129", + "title": "Reconciliation of Pre-trained Models and Prototypical Neural Networks in Few-shot Named Entity Recognition", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Incorporating large-scale pre-trained models with the prototypical neural networks is a de-facto paradigm in few-shot named entity recognition. Existing methods, unfortunately, are not aware of the fact that embeddings from pre-trained models contain a prominently large amount of information regarding word frequencies, biasing prototypical neural networks against learning word entities. This discrepancy constrains the two models\u2019 synergy. Thus, we propose a one-line-code normalization method to reconcile such a mismatch with empirical and theoretical grounds. Our experiments based on nine benchmark datasets show the superiority of our method over the counterpart models and are comparable to the state-of-the-art methods. In addition to the model enhancement, our work also provides an analytical viewpoint for addressing the general problems in few-shot name entity recognition or other tasks that rely on pre-trained models or prototypical neural networks.", + "author": "Youcheng Huang; Wenqiang Lei; Jie Fu; Jiancheng Lv", + "authorids": "/y/youcheng-huang/; /w/wenqiang-lei/; /j/jie-fu/; /j/jiancheng-lv/", + "bibtex": "@inproceedings{huang-etal-2022-reconciliation,\n title = \"Reconciliation of Pre-trained Models and Prototypical Neural Networks in Few-shot Named Entity Recognition\",\n author = \"Huang, Youcheng and\n Lei, Wenqiang and\n Fu, Jie and\n Lv, Jiancheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.129/\",\n doi = \"10.18653/v1/2022.findings-emnlp.129\",\n pages = \"1793--1807\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.129.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.129/", + "pdf_size": 629315, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5559272952217617635&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "College of Computer Science, Sichuan University\u2660\u2661; College of Computer Science, Sichuan University\u2660\u2020; Beijing Academy of Artificial Intelligence\u2663; College of Computer Science, Sichuan University\u2660", + "aff_domain": "gmail.com;gmail.com; ; ", + "email": "gmail.com;gmail.com; ; ", + "github": "https://github.com/HamLaertes/EMNLP_2022_Reconciliation", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Sichuan University;Beijing Academy of Artificial Intelligence", + "aff_unique_dep": "College of Computer Science;", + "aff_unique_url": "https://www.scu.edu.cn;https://www.baaic.cn", + "aff_unique_abbr": ";BAAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.730", + "title": "Recovering Gold from Black Sand: Multilingual Dense Passage Retrieval with Hard and False Negative Samples", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Negative samples have not been efficiently explored in multilingual dense passage retrieval. In this paper, we propose a novel multilingual dense passage retrieval framework, mHFN, to recover and utilize hard and false negative samples. mHFN consists of three key components: 1) a multilingual hard negative sample augmentation module that allows knowledge of indistinguishable passages to be shared across multiple languages and synthesizes new hard negative samples by interpolating representations of queries and existing hard negative samples, 2) a multilingual negative sample cache queue that stores negative samples from previous batches in each language to increase the number of multilingual negative samples used in training beyond the batch size limit, and 3) a lightweight adaptive false negative sample filter that uses generated pseudo labels to separate unlabeled false negative samples and converts them into positive passages in training. We evaluate mHFN on Mr. TyDi, a high-quality multilingual dense passage retrieval dataset covering eleven typologically diverse languages, and experimental results show that mHFN outperforms strong sparse, dense and hybrid baselines and achieves new state-of-the-art performance on all languages. Our source code is available at https://github.com/Magnetic2014/mHFN.", + "author": "Tianhao Shen; Mingtong Liu; Ming Zhou; Deyi Xiong", + "authorids": "/t/tianhao-shen/; /m/mingtong-liu/; /m/ming-zhou/; /d/deyi-xiong/", + "bibtex": "@inproceedings{shen-etal-2022-recovering,\n title = \"Recovering Gold from Black Sand: Multilingual Dense Passage Retrieval with Hard and False Negative Samples\",\n author = \"Shen, Tianhao and\n Liu, Mingtong and\n Zhou, Ming and\n Xiong, Deyi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.730/\",\n doi = \"10.18653/v1/2022.emnlp-main.730\",\n pages = \"10659--10670\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.730.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.730/", + "pdf_size": 550863, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17766780575518234865&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "College of Intelligence and Computing, Tianjin University, Tianjin, China+Beijing Lanzhou Technology Co., Ltd., Beijing, China; Beijing Lanzhou Technology Co., Ltd., Beijing, China; Beijing Lanzhou Technology Co., Ltd., Beijing, China; College of Intelligence and Computing, Tianjin University, Tianjin, China", + "aff_domain": "tju.edu.cn;tju.edu.cn;langboat.com;langboat.com", + "email": "tju.edu.cn;tju.edu.cn;langboat.com;langboat.com", + "github": "https://github.com/Magnetic2014/mHFN", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;1;0", + "aff_unique_norm": "Tianjin University;Beijing Lanzhou Technology Co., Ltd.", + "aff_unique_dep": "College of Intelligence and Computing;", + "aff_unique_url": "http://www.tju.edu.cn;", + "aff_unique_abbr": "Tianjin University;", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Tianjin;", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.470", + "title": "Recurrence Boosts Diversity! Revisiting Recurrent Latent Variable in Transformer-Based Variational AutoEncoder for Diverse Text Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Variational Auto-Encoder (VAE) has been widely adopted in text generation. Among many variants, recurrent VAE learns token-wise latent variables with each conditioned on the preceding ones, which captures sequential variability better in the era of RNN. However, it is unclear how to incorporate such recurrent dynamics into the recently dominant Transformer due to its parallelism. In this work, we propose TRACE, a Transformer-based recurrent VAE structure. TRACE imposes recurrence on segment-wise latent variables with arbitrarily separated text segments and constructs the posterior distribution with residual parameterization. Besides, we design an acceleration method by approximating idempotent matrices, which allows parallelism while maintaining the conditional dependence of latent variables. We demonstrate that TRACE could deduce a non-zero lower bound of the KL term and enhance the entanglement of each segment and preceding latent variables, providing a theoretical guarantee of generation diversity. Experiments on two unconditional and one conditional generation task show that TRACE achieves significantly improved diversity while maintaining satisfactory generation quality.", + "author": "Jinyi Hu; Xiaoyuan Yi; Wenhao Li; Maosong Sun; Xing Xie", + "authorids": "/j/jinyi-hu/; /x/xiaoyuan-yi/; /w/wenhao-li/; /m/maosong-sun/; /x/xing-xie/", + "bibtex": "@inproceedings{hu-etal-2022-recurrence,\n title = \"Recurrence Boosts Diversity! Revisiting Recurrent Latent Variable in Transformer-Based Variational {A}uto{E}ncoder for Diverse Text Generation\",\n author = \"Hu, Jinyi and\n Yi, Xiaoyuan and\n Li, Wenhao and\n Sun, Maosong and\n Xie, Xing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.470/\",\n doi = \"10.18653/v1/2022.findings-emnlp.470\",\n pages = \"6306--6320\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.470.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.470/", + "pdf_size": 397105, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7925244257129484000&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science and Technology, Tsinghua University, Beijing+Beijing National Research Center for Information Science and Technology+Institute for Artificial Intelligence, Tsinghua University, Beijing+Jiangsu Collaborative Innovation Center for Language Ability, Jiangsu Normal University, Xuzhou; Microsoft Research Asia; Department of Computer Science and Technology, Tsinghua University, Beijing+Beijing National Research Center for Information Science and Technology+Institute for Artificial Intelligence, Tsinghua University, Beijing+Jiangsu Collaborative Innovation Center for Language Ability, Jiangsu Normal University, Xuzhou; Department of Computer Science and Technology, Tsinghua University, Beijing+Beijing National Research Center for Information Science and Technology+Institute for Artificial Intelligence, Tsinghua University, Beijing+Jiangsu Collaborative Innovation Center for Language Ability, Jiangsu Normal University, Xuzhou; Microsoft Research Asia", + "aff_domain": "mails.tsinghua.edu.cn;microsoft.com; ;tsinghua.edu.cn; ", + "email": "mails.tsinghua.edu.cn;microsoft.com; ;tsinghua.edu.cn; ", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+0+2;3;0+1+0+2;0+1+0+2;3", + "aff_unique_norm": "Tsinghua University;Beijing National Research Center for Information Science and Technology;Jiangsu Normal University;Microsoft Research", + "aff_unique_dep": "Department of Computer Science and Technology;;Jiangsu Collaborative Innovation Center for Language Ability;Research", + "aff_unique_url": "https://www.tsinghua.edu.cn;;http://www.jsnu.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "THU;;;MSR Asia", + "aff_campus_unique_index": "0+0+2;3;0+0+2;0+0+2;3", + "aff_campus_unique": "Beijing;;Xuzhou;Asia", + "aff_country_unique_index": "0+0+0+0;0;0+0+0+0;0+0+0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.320", + "title": "Recursive Neural Networks with Bottlenecks Diagnose (Non-)Compositionality", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A recent line of work in NLP focuses on the (dis)ability of models to generalise compositionally for artificial languages.However, when considering natural language tasks, the data involved is not strictly, or locally, compositional.Quantifying the compositionality of data is a challenging task, which has been investigated primarily for short utterances.We use recursive neural models (Tree-LSTMs) with bottlenecks that limit the transfer of information between nodes.We illustrate that comparing data\u2019s representations in models with and without the bottleneck can be used to produce a compositionality metric.The procedure is applied to the evaluation of arithmetic expressions using synthetic data, and sentiment classification using natural language data.We demonstrate that compression through a bottleneck impacts non-compositional examples disproportionatelyand then use the bottleneck compositionality metric (BCM) to distinguish compositional from non-compositional samples, yielding a compositionality ranking over a dataset.", + "author": "Verna Dankers; Ivan Titov", + "authorids": "/v/verna-dankers/; /i/ivan-titov/", + "bibtex": "@inproceedings{dankers-titov-2022-recursive,\n title = \"Recursive Neural Networks with Bottlenecks Diagnose (Non-)Compositionality\",\n author = \"Dankers, Verna and\n Titov, Ivan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.320/\",\n doi = \"10.18653/v1/2022.findings-emnlp.320\",\n pages = \"4361--4378\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.320.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.320/", + "pdf_size": 1133416, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1631351988293081457&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "ILCC, University of Edinburgh; ILCC, University of Edinburgh + ILLC, University of Amsterdam", + "aff_domain": "gmail.com;inf.ed.ac.uk", + "email": "gmail.com;inf.ed.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0+1", + "aff_unique_norm": "University of Edinburgh;University of Amsterdam", + "aff_unique_dep": "ILCC;ILLC", + "aff_unique_url": "https://www.ed.ac.uk;https://www.uva.nl", + "aff_unique_abbr": "Edinburgh;UvA", + "aff_campus_unique_index": "0;0+1", + "aff_campus_unique": "Edinburgh;Amsterdam", + "aff_country_unique_index": "0;0+1", + "aff_country_unique": "United Kingdom;Netherlands" + }, + { + "id": "2022.emnlp-main.225", + "title": "Red Teaming Language Models with Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Language Models (LMs) often cannot be deployed because of their potential to harm users in hard-to-predict ways. Prior work identifies harmful behaviors before deployment by using human annotators to hand-write test cases. However, human annotation is expensive, limiting the number and diversity of test cases. In this work, we automatically find cases where a target LM behaves in a harmful way, by generating test cases (\u201cred teaming\u201d) using another LM. We evaluate the target LM\u2019s replies to generated test questions using a classifier trained to detect offensive content, uncovering tens of thousands of offensive replies in a 280B parameter LM chatbot. We explore several methods, from zero-shot generation to reinforcement learning, for generating test cases with varying levels of diversity and difficulty. Furthermore, we use prompt engineering to control LM-generated test cases to uncover a variety of other harms, automatically finding groups of people that the chatbot discusses in offensive ways, personal and hospital phone numbers generated as the chatbot\u2019s own contact info, leakage of private training data in generated text, and harms that occur over the course of a conversation. Overall, LM-based red teaming is one promising tool (among many needed) for finding and fixing diverse, undesirable LM behaviors before impacting users.", + "author": "Ethan Perez; Saffron Huang; Francis Song; Trevor Cai; Roman Ring; John Aslanides; Amelia Glaese; Nat McAleese; Geoffrey Irving", + "authorids": "/e/ethan-perez/; /s/saffron-huang/; /f/francis-song/; /t/trevor-cai/; /r/roman-ring/; /j/john-aslanides/; /a/amelia-glaese/; /n/nat-mcaleese/; /g/geoffrey-irving/", + "bibtex": "@inproceedings{perez-etal-2022-red,\n title = \"Red Teaming Language Models with Language Models\",\n author = \"Perez, Ethan and\n Huang, Saffron and\n Song, Francis and\n Cai, Trevor and\n Ring, Roman and\n Aslanides, John and\n Glaese, Amelia and\n McAleese, Nat and\n Irving, Geoffrey\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.225/\",\n doi = \"10.18653/v1/2022.emnlp-main.225\",\n pages = \"3419--3448\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.225.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.225/", + "pdf_size": 2885399, + "gs_citation": 685, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17731101759096454635&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "DeepMind+New York University; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind; DeepMind", + "aff_domain": "nyu.edu; ; ; ; ; ; ; ; ", + "email": "nyu.edu; ; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;0;0;0;0;0;0;0;0", + "aff_unique_norm": "DeepMind;New York University", + "aff_unique_dep": ";", + "aff_unique_url": "https://deepmind.com;https://www.nyu.edu", + "aff_unique_abbr": "DeepMind;NYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;0;0;0;0;0;0;0", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "2022.findings-emnlp.142", + "title": "RedApt: An Adaptor for wav2vec 2 EncodingFaster and Smaller Speech Translation without Quality Compromise", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Pre-trained speech Transformers in speech translation (ST) have facilitated state-of-the-art (SotA) results; yet, using such encoders is computationally expensive. To improve this, we present a novel Reducer Adaptor block, RedApt, that could be seamlessly integrated within any Transformer-based speech encoding architecture. Integrating the pretrained wav2vec 2 speech encoder with RedAptbrings 41% speedup, 33% memory reduction with 24% fewer FLOPs at inference. To our positive surprise, our ST model with RedApt outperforms the SotA architecture by an average of 0.68 BLEU score on 8 language pairs from Must-C.", + "author": "Jinming Zhao; Hao Yang; Gholamreza Haffari; Ehsan Shareghi", + "authorids": "/j/jinming-zhao/; /h/hao-yang/; /g/gholamreza-haffari/; /e/ehsan-shareghi/", + "bibtex": "@inproceedings{zhao-etal-2022-redapt,\n title = \"{R}ed{A}pt: An Adaptor for wav2vec 2 EncodingFaster and Smaller Speech Translation without Quality Compromise\",\n author = \"Zhao, Jinming and\n Yang, Hao and\n Haffari, Gholamreza and\n Shareghi, Ehsan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.142/\",\n doi = \"10.18653/v1/2022.findings-emnlp.142\",\n pages = \"1960--1967\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.142.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.142/", + "pdf_size": 1399982, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7719174070121815127&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Department of Data Science & AI, Monash University; Department of Data Science & AI, Monash University; Department of Data Science & AI, Monash University; Department of Data Science & AI, Monash University", + "aff_domain": "monash.edu;monash.edu;monash.edu;monash.edu", + "email": "monash.edu;monash.edu;monash.edu;monash.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Monash University", + "aff_unique_dep": "Department of Data Science & AI", + "aff_unique_url": "https://www.monash.edu", + "aff_unique_abbr": "Monash", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "2022.emnlp-main.445", + "title": "Reduce Catastrophic Forgetting of Dense Retrieval Training with Teleportation Negatives", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we investigate the instability in the standard dense retrieval training, which iterates between model training and hard negative selection using the being-trained model. We show the catastrophic forgetting phenomena behind the training instability, where models learn and forget different negative groups during training iterations. We then propose ANCE-Tele, which accumulates momentum negatives from past iterations and approximates future iterations using lookahead negatives, as \u201cteleportations\u201d along the time axis to smooth the learning process. On web search and OpenQA, ANCE-Tele outperforms previous state-of-the-art systems of similar size, eliminates the dependency on sparse retrieval negatives, and is competitive among systems using significantly more (50x) parameters. Our analysis demonstrates that teleportation negatives reduce catastrophic forgetting and improve convergence speed for dense retrieval training. The source code of this paper is available at https://github.com/OpenMatch/ANCE-Tele.", + "author": "Si Sun; Chenyan Xiong; Yue Yu; Arnold Overwijk; Zhiyuan Liu; Jie Bao", + "authorids": "/s/si-sun/; /c/chenyan-xiong/; /y/yue-yu/; /a/arnold-overwijk/; /z/zhiyuan-liu/; /j/jie-bao/", + "bibtex": "@inproceedings{sun-etal-2022-reduce,\n title = \"Reduce Catastrophic Forgetting of Dense Retrieval Training with Teleportation Negatives\",\n author = \"Sun, Si and\n Xiong, Chenyan and\n Yu, Yue and\n Overwijk, Arnold and\n Liu, Zhiyuan and\n Bao, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.445/\",\n doi = \"10.18653/v1/2022.emnlp-main.445\",\n pages = \"6639--6654\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.445.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.445/", + "pdf_size": 816697, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9195358237718687218&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "https://github.com/OpenMatch/ANCE-Tele", + "project": "", + "author_num": 6 + }, + { + "id": "2022.emnlp-main.655", + "title": "Referee: Reference-Free Sentence Summarization with Sharper Controllability through Symbolic Knowledge Distillation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present Referee, a novel framework for sentence summarization that can be trained reference-free (i.e., requiring no gold summaries for supervision), while allowing direct control for compression ratio. Our work is the first to demonstrate that reference-free, controlled sentence summarization is feasible via the conceptual framework of Symbolic Knowledge Distillation (West et al., 2022), where latent knowledge in pre-trained language models is distilled via explicit examples sampled from the teacher models, further purified with three types of filters: length, fidelity, and Information Bottleneck. Moreover, we uniquely propose iterative distillation of knowledge, where student models from the previous iteration of distillation serve as teacher models in the next iteration. Starting off from a relatively modest set of GPT3-generated summaries, we demonstrate how iterative knowledge distillation can lead to considerably smaller, but better summarizers with sharper controllability. A useful by-product of this iterative distillation process is a high-quality dataset of sentence-summary pairs with varying degrees of compression ratios. Empirical results demonstrate that the final student models vastly outperform the much larger GPT3-Instruct model in terms of the controllability of compression ratios, without compromising the quality of resulting summarization.", + "author": "Melanie Sclar; Peter West; Sachin Kumar; Yulia Tsvetkov; Yejin Choi", + "authorids": "/m/melanie-sclar/; /p/peter-west/; /s/sachin-kumar/; /y/yulia-tsvetkov/; /y/yejin-choi/", + "bibtex": "@inproceedings{sclar-etal-2022-referee,\n title = \"Referee: Reference-Free Sentence Summarization with Sharper Controllability through Symbolic Knowledge Distillation\",\n author = \"Sclar, Melanie and\n West, Peter and\n Kumar, Sachin and\n Tsvetkov, Yulia and\n Choi, Yejin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.655/\",\n doi = \"10.18653/v1/2022.emnlp-main.655\",\n pages = \"9649--9668\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.655.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.655/", + "pdf_size": 684225, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15852710002511448971&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington; Paul G. Allen School of Computer Science & Engineering, University of Washington; Language Technologies Institute, Carnegie Mellon University; Paul G. Allen School of Computer Science & Engineering, University of Washington; Paul G. Allen School of Computer Science & Engineering, University of Washington + Allen Institute for Artificial Intelligence", + "aff_domain": "cs.washington.edu; ; ; ; ", + "email": "cs.washington.edu; ; ; ; ", + "github": "https://github.com/msclar/referee", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0+2", + "aff_unique_norm": "University of Washington;Carnegie Mellon University;Allen Institute for Artificial Intelligence", + "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering;Language Technologies Institute;", + "aff_unique_url": "https://www.washington.edu;https://www.cmu.edu;https://allenai.org", + "aff_unique_abbr": "UW;CMU;AI2", + "aff_campus_unique_index": "0;0;1;0;0", + "aff_campus_unique": "Seattle;Pittsburgh;", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.455", + "title": "Refinement Matters: Textual Description Needs to be Refined for Zero-shot Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Zero-Shot Learning (ZSL) has shown great promise at the intersection of vision and language, and generative methods for ZSL are predominant owing to their efficiency. Moreover, textual description or attribute plays a critical role in transferring knowledge from the seen to unseen classes in ZSL. Such generative approaches for ZSL are very costly to train and require the class description of the unseen classes during training. In this work, we propose a non-generative gating-based attribute refinement network for ZSL, which achieves similar accuracies to generative methods of ZSL, at a much lower computational cost. The refined attributes are mapped into the visual domain through an attribute embedder, and the whole network is guided by the circle loss and the well-known softmax cross-entropy loss to obtain a robust class embedding. We refer to our approach as Circle loss guided gating-based Attribute-Refinement Network (CARNet). We perform extensive experiments on the five benchmark datasets over the various challenging scenarios viz., Generalized ZSL (GZSL), Continual GZSL (CGZSL), and conventional ZSL. We observe that the CARNet significantly outperforms recent non-generative ZSL methods and most generative ZSL methods in all three settings by a significant margin. Our extensive ablation study disentangles the performance of various components and justifies their importance. The source code is available at https://github.com/Sethup123/CARNet.", + "author": "Chandan Gautam; Sethupathy Parameswaran; Vinay Verma; Suresh Sundaram; Savitha Ramasamy", + "authorids": "/c/chandan-gautam/; /s/sethupathy-parameswaran/; /v/vinay-verma/; /s/suresh-sundaram/; /s/savitha-ramasamy/", + "bibtex": "@inproceedings{gautam-etal-2022-refinement,\n title = \"Refinement Matters: Textual Description Needs to be Refined for Zero-shot Learning\",\n author = \"Gautam, Chandan and\n Parameswaran, Sethupathy and\n Verma, Vinay and\n Sundaram, Suresh and\n Ramasamy, Savitha\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.455/\",\n doi = \"10.18653/v1/2022.findings-emnlp.455\",\n pages = \"6127--6140\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.455.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.455/", + "pdf_size": 1259663, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2757250987863676756&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Institute for Infocomm Research, A*STAR; Indian Institute of Science; Duke University; Indian Institute of Science; Institute for Infocomm Research, A*STAR", + "aff_domain": "i2r.a-star.edu.sg;iisc.ac.in;gmail.com;iisc.ac.in;i2r.a-star.edu.sg", + "email": "i2r.a-star.edu.sg;iisc.ac.in;gmail.com;iisc.ac.in;i2r.a-star.edu.sg", + "github": "https://github.com/Sethup123/CARNet", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;1;0", + "aff_unique_norm": "Institute for Infocomm Research;Indian Institute of Science;Duke University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.i2r.a-star.edu.sg;https://www.iisc.ac.in;https://www.duke.edu", + "aff_unique_abbr": "I2R;IISc;Duke", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;1;0", + "aff_country_unique": "Singapore;India;United States" + }, + { + "id": "2022.emnlp-main.714", + "title": "Reflect, Not Reflex: Inference-Based Common Ground Improves Dialogue Response Quality", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Human communication relies on common ground (CG), the mutual knowledge and beliefs shared by participants, to produce coherent and interesting conversations. In this paper, we demonstrate that current response generation (RG) models produce generic and dull responses in dialogues because they act reflexively, failing to explicitly model CG, both due to the lack of CG in training data and the standard RG training procedure. We introduce Reflect, a dataset that annotates dialogues with explicit CG (materialized as inferences approximating shared knowledge and beliefs) and solicits 9k diverse human-generated responses each following one common ground. Using Reflect, we showcase the limitations of current dialogue data and RG models: less than half of the responses in current data is rated as high quality (sensible, specific, and interesting) and models trained using this data have even lower quality, while most Reflect responses are judged high quality. Next, we analyze whether CG can help models produce better quality responses by using Reflect CG to guide RG models. Surprisingly, we find that simply prompting GPT3 to \u201cthink\u201d about CG generates 30% more quality responses, showing promising benefits to integrating CG into the RG process.", + "author": "Pei Zhou; Hyundong Cho; Pegah Jandaghi; Dong-Ho Lee; Bill Yuchen Lin; Jay Pujara; Xiang Ren", + "authorids": "/p/pei-zhou/; /h/hyundong-cho/; /p/pegah-jandaghi/; /d/dong-ho-lee/; /b/bill-yuchen-lin/; /j/jay-pujara/; /x/xiang-ren/", + "bibtex": "@inproceedings{zhou-etal-2022-reflect,\n title = \"Reflect, Not Reflex: Inference-Based Common Ground Improves Dialogue Response Quality\",\n author = \"Zhou, Pei and\n Cho, Hyundong and\n Jandaghi, Pegah and\n Lee, Dong-Ho and\n Lin, Bill Yuchen and\n Pujara, Jay and\n Ren, Xiang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.714/\",\n doi = \"10.18653/v1/2022.emnlp-main.714\",\n pages = \"10450--10468\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.714.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.714/", + "pdf_size": 2731984, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5120746556458376847&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "", + "project": "https://inklab.usc.edu/Reflect/", + "author_num": 7 + }, + { + "id": "2022.emnlp-industry.36", + "title": "Reinforced Question Rewriting for Conversational Question Answering", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Conversational Question Answering (CQA) aims to answer questions contained within dialogues, which are not easily interpretable without context. Developing a model to rewrite conversational questions into self-contained ones is an emerging solution in industry settings as it allows using existing single-turn QA systems to avoid training a CQA model from scratch. Previous work trains rewriting models using human rewrites as supervision. However, such objectives are disconnected with QA models and therefore more human-like rewrites do not guarantee better QA performance. In this paper we propose using QA feedback to supervise the rewriting model with reinforcement learning. Experiments show that our approach can effectively improve QA performance over baselines for both extractive and retrieval QA. Furthermore, human evaluation shows that our method can generate more accurate and detailed rewrites when compared to human annotations.", + "author": "Zhiyu Chen; Jie Zhao; Anjie Fang; Besnik Fetahu; Oleg Rokhlenko; Shervin Malmasi", + "authorids": "/z/zhiyu-chen/; /j/jie-zhao/; /a/anjie-fang/; /b/besnik-fetahu/; /o/oleg-rokhlenko/; /s/shervin-malmasi/", + "bibtex": "@inproceedings{chen-etal-2022-reinforced,\n title = \"Reinforced Question Rewriting for Conversational Question Answering\",\n author = \"Chen, Zhiyu and\n Zhao, Jie and\n Fang, Anjie and\n Fetahu, Besnik and\n Rokhlenko, Oleg and\n Malmasi, Shervin\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.36/\",\n doi = \"10.18653/v1/2022.emnlp-industry.36\",\n pages = \"357--370\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.36.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.36/", + "pdf_size": 1222503, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10411243094243267778&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Amazon.com, Inc., Seattle, WA, USA; Amazon.com, Inc., Seattle, WA, USA; Amazon.com, Inc., Seattle, WA, USA; Amazon.com, Inc., Seattle, WA, USA; Amazon.com, Inc., Seattle, WA, USA; Amazon.com, Inc., Seattle, WA, USA", + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Amazon.com, Inc.", + "aff_unique_dep": "", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Seattle", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.317", + "title": "RelCLIP: Adapting Language-Image Pretraining for Visual Relationship Detection via Relational Contrastive Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Conventional visual relationship detection models only use the numeric ids of relation labels for training, but ignore the semantic correlation between the labels, which leads to severe training biases and harms the generalization ability of representations. In this paper, we introduce compact language information of relation labels for regularizing the representation learning of visual relations. Specifically, we propose a simple yet effective visual Relationship prediction framework that transfers natural language knowledge learned from Contrastive Language-Image Pre-training (CLIP) models to enhance the relationship prediction, termed RelCLIP. Benefiting from the powerful visual-semantic alignment ability of CLIP at image level, we introduce a novel Relational Contrastive Learning (RCL) approach which explores relation-level visual-semantic alignment via learning to match cross-modal relational embeddings. By collaboratively learning the semantic coherence and discrepancy from relation triplets, the model can generate more discriminative and robust representations. Experimental results on the Visual Genome dataset show that RelCLIP achieves significant improvements over strong baselines under full (provide accurate labels) and distant supervision (provide noise labels), demonstrating its powerful generalization ability in learning relationship representations. Code will be available at https://gitee.com/mindspore/models/tree/master/research/cv/RelCLIP.", + "author": "Yi Zhu; Zhaoqing Zhu; Bingqian Lin; Xiaodan Liang; Feng Zhao; Jianzhuang Liu", + "authorids": "/y/yi-zhu/; /z/zhaoqing-zhu/; /b/bingqian-lin/; /x/xiaodan-liang/; /f/feng-zhao/; /j/jianzhuang-liu/", + "bibtex": "@inproceedings{zhu-etal-2022-relclip,\n title = \"{R}el{CLIP}: Adapting Language-Image Pretraining for Visual Relationship Detection via Relational Contrastive Learning\",\n author = \"Zhu, Yi and\n Zhu, Zhaoqing and\n Lin, Bingqian and\n Liang, Xiaodan and\n Zhao, Feng and\n Liu, Jianzhuang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.317/\",\n doi = \"10.18653/v1/2022.emnlp-main.317\",\n pages = \"4800--4810\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.317.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.317/", + "pdf_size": 1210393, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9209444649158463720&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Huawei Noah\u2019s Ark Lab; University of Science and Technology of China; Sun Yat-sen University; Sun Yat-sen University; University of Science and Technology of China; Huawei Noah\u2019s Ark Lab", + "aff_domain": "outlook.com;mail.ustc.edu.cn;126.com;gmail.com;ustc.edu.cn;huawei.com", + "email": "outlook.com;mail.ustc.edu.cn;126.com;gmail.com;ustc.edu.cn;huawei.com", + "github": "", + "project": "https://gitee.com/mindspore/models/tree/master/research/cv/RelCLIP", + "author_num": 6, + "aff_unique_index": "0;1;2;2;1;0", + "aff_unique_norm": "Huawei;University of Science and Technology of China;Sun Yat-sen University", + "aff_unique_dep": "Noah\u2019s Ark Lab;;", + "aff_unique_url": "https://www.huawei.com;http://www.ustc.edu.cn;http://www.sysu.edu.cn/", + "aff_unique_abbr": "Huawei;USTC;SYSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.282", + "title": "RelU-Net: Syntax-aware Graph U-Net for Relational Triple Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Relational triple extraction is a critical task for natural language processing. Existing methods mainly focused on capturing semantic information, but suffered from ignoring the syntactic structure of the sentence, which is proved in the relation classification task to contain rich relational information. This is due to the absence of entity locations, which is the prerequisite for pruning noisy edges from the dependency tree, when extracting relational triples. In this paper, we propose a unified framework to tackle this challenge and incorporate syntactic information for relational triple extraction. First, we propose to automatically contract the dependency tree into a core relational topology and eliminate redundant information with graph pooling operations. Then, we propose a symmetrical expanding path with graph unpooling operations to fuse the contracted core syntactic interactions with the original sentence context. We also propose a bipartite graph matching objective function to capture the reflections between the core topology and golden relational facts. Since our model shares similar contracting and expanding paths with encoder-decoder models like U-Net, we name our model as Relation U-Net (RelU-Net). We conduct experiments on several datasets and the results prove the effectiveness of our method.", + "author": "Yunqi Zhang; Yubo Chen; Yongfeng Huang", + "authorids": "/y/yunqi-zhang/; /y/yubo-chen/; /y/yongfeng-huang/", + "bibtex": "@inproceedings{zhang-etal-2022-relu,\n title = \"{R}el{U}-Net: Syntax-aware Graph {U}-Net for Relational Triple Extraction\",\n author = \"Zhang, Yunqi and\n Chen, Yubo and\n Huang, Yongfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.282/\",\n doi = \"10.18653/v1/2022.emnlp-main.282\",\n pages = \"4208--4217\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.282.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.282/", + "pdf_size": 1116045, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11336071485031684719&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 0, + "aff": "Department of Electronic Engineering & BNRist, Tsinghua University, Beijing, China; Department of Electronic Engineering & BNRist, Tsinghua University, Beijing, China + Zhongguancun Laboratory, Beijing, China; Department of Electronic Engineering & BNRist, Tsinghua University, Beijing, China + Zhongguancun Laboratory, Beijing, China", + "aff_domain": "mails.tsinghua.edu.cn;gmail.com;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;gmail.com;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;0+1", + "aff_unique_norm": "Tsinghua University;Zhongguancun Laboratory", + "aff_unique_dep": "Department of Electronic Engineering;", + "aff_unique_url": "https://www.tsinghua.edu.cn;", + "aff_unique_abbr": "THU;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.723", + "title": "Reorder and then Parse, Fast and Accurate Discontinuous Constituency Parsing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Discontinuous constituency parsing is still kept developing for its efficiency and accuracy are far behind its continuous counterparts. Motivated by the observation that a discontinuous constituent tree can be simply transformed into a pseudo-continuous one by artificially reordering words in the sentence, we propose a novel reordering method, thereby construct fast and accurate discontinuous constituency parsing systems working in continuous way. Specifically, we model the relative position changes of words as a list of actions. By parsing and performing this actions, the corresponding pseudo-continuous sequence is derived. Discontinuous parse tree can be further inferred via integrating a high-performance pseudo-continuous constituency parser. Our systems are evaluated on three classical discontinuous constituency treebanks, achieving new state-of-the-art on two treebanks and showing a distinct advantage in speed.", + "author": "Kailai Sun; Zuchao Li; Hai Zhao", + "authorids": "/k/kailai-sun/; /z/zuchao-li/; /h/hai-zhao/", + "bibtex": "@inproceedings{sun-etal-2022-reorder,\n title = \"Reorder and then Parse, Fast and Accurate Discontinuous Constituency Parsing\",\n author = \"Sun, Kailai and\n Li, Zuchao and\n Zhao, Hai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.723/\",\n doi = \"10.18653/v1/2022.emnlp-main.723\",\n pages = \"10575--10588\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.723.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.723/", + "pdf_size": 527267, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10887317410471199685&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University + MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University; School of Computer Science, Wuhan University; Department of Computer Science and Engineering, Shanghai Jiao Tong University + MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University", + "aff_domain": "sjtu.edu.cn;gmail.com;cs.sjtu.edu.cn", + "email": "sjtu.edu.cn;gmail.com;cs.sjtu.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;1;0+0", + "aff_unique_norm": "Shanghai Jiao Tong University;Wuhan University", + "aff_unique_dep": "Department of Computer Science and Engineering;School of Computer Science", + "aff_unique_url": "https://www.sjtu.edu.cn;http://www.whu.edu.cn", + "aff_unique_abbr": "SJTU;WHU", + "aff_campus_unique_index": "1;2;1", + "aff_campus_unique": ";Shanghai;Wuhan", + "aff_country_unique_index": "0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.49", + "title": "Representation Learning for Resource-Constrained Keyphrase Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "State-of-the-art keyphrase generation methods generally depend on large annotated datasets, limiting their performance in domains with limited annotated data. To overcome this challenge, we design a data-oriented approach that first identifies salient information using retrieval-based corpus-level statistics, and then learns a task-specific intermediate representation based on a pre-trained language model using large-scale unlabeled documents. We introduce salient span recovery and salient span prediction as denoising training objectives that condense the intra-article and inter-article knowledge essential for keyphrase generation. Through experiments on multiple keyphrase generation benchmarks, we show the effectiveness of the proposed approach for facilitating low-resource keyphrase generation and zero-shot domain adaptation. Our method especially benefits the generation of absent keyphrases, approaching the performance of models trained with large training sets.", + "author": "Di Wu; Wasi Ahmad; Sunipa Dev; Kai-Wei Chang", + "authorids": "/d/di-wu/; /w/wasi-ahmad/; /s/sunipa-dev/; /k/kai-wei-chang/", + "bibtex": "@inproceedings{wu-etal-2022-representation,\n title = \"Representation Learning for Resource-Constrained Keyphrase Generation\",\n author = \"Wu, Di and\n Ahmad, Wasi and\n Dev, Sunipa and\n Chang, Kai-Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.49/\",\n doi = \"10.18653/v1/2022.findings-emnlp.49\",\n pages = \"700--716\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.49.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.49/", + "pdf_size": 440913, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16150495495070088423&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of California, Los Angeles; AWS AI Labs; University of California, Los Angeles; University of California, Los Angeles", + "aff_domain": "cs.ucla.edu;ucla.edu;cs.ucla.edu;cs.ucla.edu", + "email": "cs.ucla.edu;ucla.edu;cs.ucla.edu;cs.ucla.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of California, Los Angeles;Amazon Web Services", + "aff_unique_dep": ";AWS AI Labs", + "aff_unique_url": "https://www.ucla.edu;https://aws.amazon.com", + "aff_unique_abbr": "UCLA;AWS", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.192", + "title": "Reproducibility Issues for BERT-based Evaluation Metrics", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Reproducibility is of utmost concern in machine learning and natural language processing (NLP). In the field of natural language generation (especially machine translation), the seminal paper of Post (2018) has pointed out problems of reproducibility of the dominant metric, BLEU, at the time of publication. Nowadays, BERT-based evaluation metrics considerably outperform BLEU. In this paper, we ask whether results and claims from four recent BERT-based metrics can be reproduced. We find that reproduction of claims and results often fails because of (i) heavy undocumented preprocessing involved in the metrics, (ii) missing code and (iii) reporting weaker results for the baseline metrics. (iv) In one case, the problem stems from correlating not to human scores but to a wrong column in the csv file, inflating scores by 5 points. Motivated by the impact of preprocessing, we then conduct a second study where we examine its effects more closely (for one of the metrics). We find that preprocessing can have large effects, especially for highly inflectional languages. In this case, the effect of preprocessing may be larger than the effect of the aggregation mechanism (e.g., greedy alignment vs. Word Mover Distance).", + "author": "Yanran Chen; Jonas Belouadi; Steffen Eger", + "authorids": "/y/yanran-chen/; /j/jonas-belouadi/; /s/steffen-eger/", + "bibtex": "@inproceedings{chen-etal-2022-reproducibility,\n title = \"Reproducibility Issues for {BERT}-based Evaluation Metrics\",\n author = \"Chen, Yanran and\n Belouadi, Jonas and\n Eger, Steffen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.192/\",\n doi = \"10.18653/v1/2022.emnlp-main.192\",\n pages = \"2965--2989\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.192.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.192/", + "pdf_size": 2693034, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12991506762761591426&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Computer Science Department, Technical University of Darmstadt, Germany; NLLG, Faculty of Technology, Bielefeld University, Germany; NLLG, Faculty of Technology, Bielefeld University, Germany", + "aff_domain": "stud.tu-darmstadt.de;uni-bielefeld.de;uni-bielefeld.de", + "email": "stud.tu-darmstadt.de;uni-bielefeld.de;uni-bielefeld.de", + "github": "", + "project": "https://aclrollingreview.org/responsibleNLPresearch/", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "Technical University of Darmstadt;Bielefeld University", + "aff_unique_dep": "Computer Science Department;Faculty of Technology", + "aff_unique_url": "https://www.tu-darmstadt.de;https://www.uni-bielefeld.de", + "aff_unique_abbr": "TUD;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.150", + "title": "Reproducibility in Computational Linguistics: Is Source Code Enough?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The availability of source code has been put forward as one of the most critical factors for improving the reproducibility of scientific research. This work studies trends in source code availability at major computational linguistics conferences, namely, ACL, EMNLP, LREC, NAACL, and COLING. We observe positive trends, especially in conferences that actively promote reproducibility. We follow this by conducting a reproducibility study of eight papers published in EMNLP 2021, finding that source code releases leave much to be desired. Moving forward, we suggest all conferences require self-contained artifacts and provide a venue to evaluate such artifacts at the time of publication. Authors can include small-scale experiments and explicit scripts to generate each result to improve the reproducibility of their work.", + "author": "Mohammad Arvan; Lu\u00eds Pina; Natalie Parde", + "authorids": "/m/mohammad-arvan/; /l/luis-pina/; /n/natalie-parde/", + "bibtex": "@inproceedings{arvan-etal-2022-reproducibility-computational,\n title = \"Reproducibility in Computational Linguistics: Is Source Code Enough?\",\n author = \"Arvan, Mohammad and\n Pina, Lu{\\'i}s and\n Parde, Natalie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.150/\",\n doi = \"10.18653/v1/2022.emnlp-main.150\",\n pages = \"2350--2361\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.150.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.150/", + "pdf_size": 267296, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15645834977654206526&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Department of Computer Science, University of Illinois Chicago; Department of Computer Science, University of Illinois Chicago; Department of Computer Science, University of Illinois Chicago", + "aff_domain": "uic.edu;uic.edu;uic.edu", + "email": "uic.edu;uic.edu;uic.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Illinois Chicago", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.uic.edu", + "aff_unique_abbr": "UIC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chicago", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.113", + "title": "Rescue Implicit and Long-tail Cases: Nearest Neighbor Relation Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Relation extraction (RE) has achieved remarkable progress with the help of pre-trained language models. However, existing RE models are usually incapable of handling two situations: implicit expressions and long-tail relation types, caused by language complexity and data sparsity. In this paper, we introduce a simple enhancement of RE using k nearest neighbors (kNN-RE). kNN-RE allows the model to consult training relations at test time through a nearest-neighbor search and provides a simple yet effective means to tackle the two issues above. Additionally, we observe that kNN-RE serves as an effective way to leverage distant supervision (DS) data for RE. Experimental results show that the proposed kNN-RE achieves state-of-the-art performances on a variety of supervised RE datasets, i.e., ACE05, SciERC, and Wiki80, along with outperforming the best model to date on the i2b2 and Wiki80 datasets in the setting of allowing using DS. Our code and models are available at: https://github.com/YukinoWan/kNN-RE.", + "author": "Zhen Wan; Qianying Liu; Zhuoyuan Mao; Fei Cheng; Sadao Kurohashi; Jiwei Li", + "authorids": "/z/zhen-wan/; /q/qianying-liu/; /z/zhuoyuan-mao/; /f/fei-cheng/; /s/sadao-kurohashi/; /j/jiwei-li/", + "bibtex": "@inproceedings{wan-etal-2022-rescue,\n title = \"Rescue Implicit and Long-tail Cases: Nearest Neighbor Relation Extraction\",\n author = \"Wan, Zhen and\n Liu, Qianying and\n Mao, Zhuoyuan and\n Cheng, Fei and\n Kurohashi, Sadao and\n Li, Jiwei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.113/\",\n doi = \"10.18653/v1/2022.emnlp-main.113\",\n pages = \"1731--1738\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.113.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.113/", + "pdf_size": 577609, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1320899650855593125&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 6, + "aff": "Kyoto University, Japan; Kyoto University, Japan; Kyoto University, Japan; Kyoto University, Japan; Kyoto University, Japan; Zhejiang University, China", + "aff_domain": "nlp.ist.i.kyoto-u.ac.jp;nlp.ist.i.kyoto-u.ac.jp;nlp.ist.i.kyoto-u.ac.jp;i.kyoto-u.ac.jp;i.kyoto-u.ac.jp;zju.edu.cn", + "email": "nlp.ist.i.kyoto-u.ac.jp;nlp.ist.i.kyoto-u.ac.jp;nlp.ist.i.kyoto-u.ac.jp;i.kyoto-u.ac.jp;i.kyoto-u.ac.jp;zju.edu.cn", + "github": "https://github.com/YukinoWan/kNN-RE", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;1", + "aff_unique_norm": "Kyoto University;Zhejiang University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.kyoto-u.ac.jp;http://www.zju.edu.cn", + "aff_unique_abbr": "Kyoto U;ZJU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;1", + "aff_country_unique": "Japan;China" + }, + { + "id": "2022.findings-emnlp.109", + "title": "Residual Learning of Neural Text Generation with n-gram Language Model", + "track": "main", + "status": "finding", + "award": false, + "abstract": "N-gram language models (LM) has been largely superseded by neural LMs as the latter exhibits better performance. However, we find that n-gram models can achieve satisfactory performance on a large proportion of testing cases, indicating they have already captured abundant knowledge of the language with relatively low computational cost. With this observation, we propose to learn a neural LM that fits the residual between an n-gram LM and the real-data distribution. The combination of n-gram LMs and neural LMs not only allows the neural part to focus on deeper understanding of the language, but also provides a flexible way to customize a LM by switching the underlying n-gram model without changing the neural model. Experimental results on three typical language tasks (i.e., language modeling, machine translation, and summarization) demonstrate that our approach attains additional performance gains over popular standalone neural models consistently. We also show that our approach allows for effective domain adaptation by simply switching to a domain-specific n-gram model, without any extra training.", + "author": "Huayang Li; Deng Cai; Jin Xu; Taro Watanabe", + "authorids": "/h/huayang-li/; /d/deng-cai/; /j/jin-xu/; /t/taro-watanabe/", + "bibtex": "@inproceedings{li-etal-2022-residual,\n title = \"Residual Learning of Neural Text Generation with n-gram Language Model\",\n author = \"Li, Huayang and\n Cai, Deng and\n Xu, Jin and\n Watanabe, Taro\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.109/\",\n doi = \"10.18653/v1/2022.findings-emnlp.109\",\n pages = \"1523--1533\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.109.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.109/", + "pdf_size": 296326, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3370457919208666300&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Nara Institute of Science and Technology; The Chinese University of Hong Kong; Institute for Interdisciplinary Information Sciences, Tsinghua University; Nara Institute of Science and Technology", + "aff_domain": "is.naist.jp;gmail.com;mails.tsinghua.edu.cn;is.naist.jp", + "email": "is.naist.jp;gmail.com;mails.tsinghua.edu.cn;is.naist.jp", + "github": "https://github.com/ghrua/NgramRes", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Nara Institute of Science and Technology;The Chinese University of Hong Kong;Tsinghua University", + "aff_unique_dep": ";;Institute for Interdisciplinary Information Sciences", + "aff_unique_url": "https://www.nist.go.jp;https://www.cuhk.edu.hk;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "NIST;CUHK;Tsinghua", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "Japan;China" + }, + { + "id": "2022.emnlp-main.561", + "title": "Rethinking Multi-Modal Alignment in Multi-Choice VideoQA from Feature and Sample Perspectives", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Reasoning about causal and temporal event relations in videos is a new destination of Video Question Answering (VideoQA). The major stumbling block to achieve this purpose is the semantic gap between language and video since they are at different levels of abstraction. Existing efforts mainly focus on designing sophisticated architectures while utilizing frame- or object-level visual representations. In this paper, we reconsider the multi-modal alignment problem in VideoQA from feature and sample perspectives to achieve better performance. From the view of feature, we break down the video into trajectories and first leverage trajectory feature in VideoQA to enhance the alignment between two modalities. Moreover, we adopt a heterogeneous graph architecture and design a hierarchical framework to align both trajectory-level and frame-level visual feature with language feature. In addition, we found that VideoQA models are largely dependent on languagepriors and always neglect visual-language interactions. Thus, two effective yet portable training augmentation strategies are designed to strengthen the cross-modal correspondence ability of our model from the view of sample. Extensive results show that our method outperforms all the state-of the-art models on the challenging NExT-QA benchmark.", + "author": "Shaoning Xiao; Long Chen; Kaifeng Gao; Zhao Wang; Yi Yang; Zhimeng Zhang; Jun Xiao", + "authorids": "/s/shaoning-xiao/; /l/long-chen/; /k/kaifeng-gao/; /z/zhao-wang/; /y/yi-yang/; /z/zhimeng-zhang/; /j/jun-xiao/", + "bibtex": "@inproceedings{xiao-etal-2022-rethinking,\n title = \"Rethinking Multi-Modal Alignment in Multi-Choice {V}ideo{QA} from Feature and Sample Perspectives\",\n author = \"Xiao, Shaoning and\n Chen, Long and\n Gao, Kaifeng and\n Wang, Zhao and\n Yang, Yi and\n Zhang, Zhimeng and\n Xiao, Jun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.561/\",\n doi = \"10.18653/v1/2022.emnlp-main.561\",\n pages = \"8188--8198\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.561.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.561/", + "pdf_size": 2390105, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17537633899323425782&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "", + "project": "", + "author_num": 7 + }, + { + "id": "2022.emnlp-main.210", + "title": "Rethinking Positional Encoding in Tree Transformer for Code Representation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transformers are now widely used in code representation, and several recent works further develop tree Transformers to capture the syntactic structure in source code. Specifically, novel tree positional encodings have been proposed to incorporate inductive bias into Transformer.In this work, we propose a novel tree Transformer encoding node positions based on our new description method for tree structures.Technically, local and global soft bias shown in previous works is both introduced as positional encodings of our Transformer model.Our model finally outperforms strong baselines on code summarization and completion tasks across two languages, demonstrating our model\u2019s effectiveness.Besides, extensive experiments and ablation study shows that combining both local and global paradigms is still helpful in improving model performance. We release our code at https://github.com/AwdHanPeng/TreeTransformer.", + "author": "Han Peng; Ge Li; Yunfei Zhao; Zhi Jin", + "authorids": "/h/han-peng/; /g/ge-li/; /y/yunfei-zhao/; /z/zhi-jin/", + "bibtex": "@inproceedings{peng-etal-2022-rethinking,\n title = \"Rethinking Positional Encoding in Tree Transformer for Code Representation\",\n author = \"Peng, Han and\n Li, Ge and\n Zhao, Yunfei and\n Jin, Zhi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.210/\",\n doi = \"10.18653/v1/2022.emnlp-main.210\",\n pages = \"3204--3214\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.210.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.210/", + "pdf_size": 544358, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12668765013189969735&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Key Laboratory of High Confidence Software Technologies (Peking University), Ministry of Education + Institute of Software, EECS, Peking University, Beijing, China; Key Laboratory of High Confidence Software Technologies (Peking University), Ministry of Education + Institute of Software, EECS, Peking University, Beijing, China; Key Laboratory of High Confidence Software Technologies (Peking University), Ministry of Education + Institute of Software, EECS, Peking University, Beijing, China; Key Laboratory of High Confidence Software Technologies (Peking University), Ministry of Education + Institute of Software, EECS, Peking University, Beijing, China", + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "https://github.com/AwdHanPeng/TreeTransformer", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;0+0;0+0", + "aff_unique_norm": "Peking University", + "aff_unique_dep": "Key Laboratory of High Confidence Software Technologies", + "aff_unique_url": "http://www.pku.edu.cn", + "aff_unique_abbr": "PKU", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.366", + "title": "Rethinking Style Transformer with Energy-based Interpretation: Adversarial Unsupervised Style Transfer using a Pretrained Model", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Style control, content preservation, and fluency determine the quality of text style transfer models. To train on a nonparallel corpus, several existing approaches aim to deceive the style discriminator with an adversarial loss. However, adversarial training significantly degrades fluency compared to the other two metrics. In this work, we explain this phenomenon using energy-based interpretation, and leverage a pretrained language model to improve fluency. Specifically, we propose a novel approach which applies the pretrained language model to the text style transfer framework by restructuring the discriminator and the model itself, allowing the generator and the discriminator to also take advantage of the power of the pretrained model. We evaluated our model on three public benchmarks GYAFC, Amazon, and Yelp and achieved state-of-the-art performance on the overall metrics.", + "author": "Hojun Cho; Dohee Kim; Seungwoo Ryu; ChaeHun Park; Hyungjong Noh; Jeong-in Hwang; Minseok Choi; Edward Choi; Jaegul Choo", + "authorids": "/h/hojun-cho/; /d/dohee-kim/; /s/seungwoo-ryu/; /c/chaehun-park/; /h/hyungjong-noh/; /j/jeong-in-hwang/; /m/minseok-choi/; /e/edward-choi/; /j/jaegul-choo/", + "bibtex": "@inproceedings{cho-etal-2022-rethinking,\n title = \"Rethinking Style Transformer with Energy-based Interpretation: Adversarial Unsupervised Style Transfer using a Pretrained Model\",\n author = \"Cho, Hojun and\n Kim, Dohee and\n Ryu, Seungwoo and\n Park, ChaeHun and\n Noh, Hyungjong and\n Hwang, Jeong-in and\n Choi, Minseok and\n Choi, Edward and\n Choo, Jaegul\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.366/\",\n doi = \"10.18653/v1/2022.emnlp-main.366\",\n pages = \"5452--5467\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.366.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.366/", + "pdf_size": 628685, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12963040912068111814&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "KAIST AI; KAIST AI; KAIST AI; KAIST AI; NCSOFT Corporation; NCSOFT Corporation; KAIST AI; KAIST AI; KAIST AI", + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;ncsoft.com;ncsoft.com;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;ncsoft.com;ncsoft.com;kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;1;1;0;0;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology;NCSOFT Corporation", + "aff_unique_dep": "KAIST AI;", + "aff_unique_url": "https://www.kaist.edu;https://www.ncsoft.com", + "aff_unique_abbr": "KAIST;NCSOFT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.emnlp-main.729", + "title": "Rethinking Task-Specific Knowledge Distillation: Contextualized Corpus as Better Textbook", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge distillation has been proven effective when customizing small language models for specific tasks. Here, a corpus as \u2018textbook\u2019 plays an indispensable role, only through which the teacher can teach the student. Prevailing methods adopt a two-stage distillation paradigm: general distillation first with task-agnostic general corpus and task-specific distillation next with augmented task-specific corpus. We argue that such a paradigm may not be optimal. In general distillation, it\u2019s extravagant to let the diverse but desultory general knowledge overwhelms the limited model capacity of the student. While in task-specific distillation, the task corpus is usually limited and narrow, preventing the student from learning enough knowledge. To mitigate the issues in the two gapped corpora, we present a better textbook for the student to learn: contextualized corpus that contextualizes task corpus with large-scale general corpus through relevance-based text retrieval. Experimental results on GLUE benchmark demonstrate that contextualized corpus is the better textbook compared with jointly using general corpus and augmented task-specific corpus. Surprisingly, it enables task-specific distillation from scratch without general distillation while maintaining comparable performance, making it more flexible to customize the student model with desired model size under various computation constraints.", + "author": "Chang Liu; Chongyang Tao; Jianxin Liang; Tao Shen; Jiazhan Feng; Quzhe Huang; Dongyan Zhao", + "authorids": "/c/chang-liu/; /c/chongyang-tao/; /j/jianxin-liang/; /t/tao-shen/; /j/jiazhan-feng/; /q/quzhe-huang/; /d/dongyan-zhao/", + "bibtex": "@inproceedings{liu-etal-2022-rethinking-task,\n title = \"Rethinking Task-Specific Knowledge Distillation: Contextualized Corpus as Better Textbook\",\n author = \"Liu, Chang and\n Tao, Chongyang and\n Liang, Jianxin and\n Shen, Tao and\n Feng, Jiazhan and\n Huang, Quzhe and\n Zhao, Dongyan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.729/\",\n doi = \"10.18653/v1/2022.emnlp-main.729\",\n pages = \"10652--10658\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.729.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.729/", + "pdf_size": 288317, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16276054050063582348&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Wangxuan Institute of Computer Technology, Peking University+Center for Data Science, Peking University+School of Intelligence Science and Technology, Peking University; Wangxuan Institute of Computer Technology, Peking University+School of Intelligence Science and Technology, Peking University+The MOE Key Laboratory of Computational Linguistics, Peking University; Wangxuan Institute of Computer Technology, Peking University+School of Intelligence Science and Technology, Peking University+The MOE Key Laboratory of Computational Linguistics, Peking University; AAII, University of Technology Sydney; Wangxuan Institute of Computer Technology, Peking University+School of Intelligence Science and Technology, Peking University+The MOE Key Laboratory of Computational Linguistics, Peking University; Wangxuan Institute of Computer Technology, Peking University+School of Intelligence Science and Technology, Peking University+The MOE Key Laboratory of Computational Linguistics, Peking University; Wangxuan Institute of Computer Technology, Peking University+Center for Data Science, Peking University+Institute for Artificial Intelligence, Peking University+The MOE Key Laboratory of Computational Linguistics, Peking University", + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;uts.edu.au;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;uts.edu.au;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+0+0;0+0+0;0+0+0;1;0+0+0;0+0+0;0+0+0+0", + "aff_unique_norm": "Peking University;University of Technology Sydney", + "aff_unique_dep": "Wangxuan Institute of Computer Technology;AAII", + "aff_unique_url": "http://www.pku.edu.cn;https://www.uts.edu.au", + "aff_unique_abbr": "PKU;UTS", + "aff_campus_unique_index": "1;;;;;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0+0;0+0+0;0+0+0;1;0+0+0;0+0+0;0+0+0+0", + "aff_country_unique": "China;Australia" + }, + { + "id": "2022.emnlp-main.380", + "title": "Rethinking the Authorship Verification Experimental Setups", + "track": "main", + "status": "Main", + "award": false, + "abstract": "One of the main drivers of the recent advances in authorship verification is the PAN large-scale authorship dataset. Despite generating significant progress in the field, inconsistent performance differences between the closed and open test sets have been reported. To this end, we improve the experimental setup by proposing five new public splits over the PAN dataset, specifically designed to isolate and identify biases related to the text topic and to the author\u2019s writing style. We evaluate several BERT-like baselines on these splits, showing that such models are competitive with authorship verification state-of-the-art methods. Furthermore, using explainable AI, we find that these baselines are biased towards named entities. We show that models trained without the named entities obtain better results and generalize better when tested on DarkReddit, our new dataset for authorship verification.", + "author": "Florin Brad; Andrei Manolache; Elena Burceanu; Antonio Barbalau; Radu Tudor Ionescu; Marius Popescu", + "authorids": "/f/florin-brad/; /a/andrei-manolache/; /e/elena-burceanu/; /a/antonio-barbalau/; /r/radu-tudor-ionescu/; /m/marius-popescu/", + "bibtex": "@inproceedings{brad-etal-2022-rethinking,\n title = \"Rethinking the Authorship Verification Experimental Setups\",\n author = \"Brad, Florin and\n Manolache, Andrei and\n Burceanu, Elena and\n Barbalau, Antonio and\n Ionescu, Radu Tudor and\n Popescu, Marius\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.380/\",\n doi = \"10.18653/v1/2022.emnlp-main.380\",\n pages = \"5634--5643\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.380.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.380/", + "pdf_size": 626441, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7028328058892612337&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Bitdefender; Bitdefender+University of Stuttgart; Bitdefender; University of Bucharest; University of Bucharest; University of Bucharest", + "aff_domain": "bitdefender.com;bitdefender.com;bitdefender.com;fmi.unibuc.ro;gmail.com;gmail.com", + "email": "bitdefender.com;bitdefender.com;bitdefender.com;fmi.unibuc.ro;gmail.com;gmail.com", + "github": "https://github.com/bit-ml/Dupin/tree/main", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0;2;2;2", + "aff_unique_norm": "Bitdefender;University of Stuttgart;University of Bucharest", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.bitdefender.com;https://www.uni-stuttgart.de;https://www.unibuc.ro", + "aff_unique_abbr": "Bitdefender;USTuttgart;Unibuc", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+1;0;0;0;0", + "aff_country_unique": "Romania;Germany" + }, + { + "id": "2022.emnlp-main.759", + "title": "Rethinking the Role of Demonstrations: What Makes In-Context Learning Work?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large language models (LMs) are able to in-context learn\u2014perform a new task via inference alone by conditioning on a few input-label pairs (demonstrations) and making predictions for new inputs. However, there has been little understanding of how the model learns and which aspects of the demonstrations contribute to end task performance. In this paper, we show that ground truth demonstrations are in fact not required\u2014randomly replacing labels in the demonstrations barely hurts performance on a range of classification and multi-choce tasks, consistently over 12 different models including GPT-3. Instead, we find that other aspects of the demonstrations are the key drivers of endtask performance, including the fact that they provide a few examples of (1) the label space, (2) the distribution of the input text, and (3) the overall format of the sequence. Together, our analysis provides a new way of understanding how and why in-context learning works, while opening up new questions about how much can be learned from large language models through inference alone.", + "author": "Sewon Min; Xinxi Lyu; Ari Holtzman; Mikel Artetxe; Mike Lewis; Hannaneh Hajishirzi; Luke Zettlemoyer", + "authorids": "/s/sewon-min/; /x/xinxi-lyu/; /a/ari-holtzman/; /m/mikel-artetxe/; /m/mike-lewis/; /h/hannaneh-hajishirzi/; /l/luke-zettlemoyer/", + "bibtex": "@inproceedings{min-etal-2022-rethinking,\n title = \"Rethinking the Role of Demonstrations: What Makes In-Context Learning Work?\",\n author = \"Min, Sewon and\n Lyu, Xinxi and\n Holtzman, Ari and\n Artetxe, Mikel and\n Lewis, Mike and\n Hajishirzi, Hannaneh and\n Zettlemoyer, Luke\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.759/\",\n doi = \"10.18653/v1/2022.emnlp-main.759\",\n pages = \"11048--11064\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.759.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.759/", + "pdf_size": 752125, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=885352432019761819&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "University of Washington + Meta AI; University of Washington; University of Washington; Meta AI; Meta AI; University of Washington + Allen Institute for AI; University of Washington + Meta AI", + "aff_domain": "cs.washington.edu;cs.washington.edu;cs.washington.edu;meta.com;meta.com;cs.washington.edu;cs.washington.edu", + "email": "cs.washington.edu;cs.washington.edu;cs.washington.edu;meta.com;meta.com;cs.washington.edu;cs.washington.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0;0;1;1;0+2;0+1", + "aff_unique_norm": "University of Washington;Meta Platforms, Inc.;Allen Institute for AI", + "aff_unique_dep": ";Meta AI;", + "aff_unique_url": "https://www.washington.edu;https://meta.com;https://allenai.org", + "aff_unique_abbr": "UW;Meta;AI2", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.41", + "title": "Rethinking the Video Sampling and Reasoning Strategies for Temporal Sentence Grounding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Temporal sentence grounding (TSG) aims to identify the temporal boundary of a specific segment from an untrimmed video by a sentence query. All existing works first utilize a sparse sampling strategy to extract a fixed number of video frames and then interact them with query for reasoning.However, we argue that these methods have overlooked two indispensable issues:1) Boundary-bias: The annotated target segment generally refers to two specific frames as corresponding start and end timestamps. The video downsampling process may lose these two frames and take the adjacent irrelevant frames as new boundaries.2) Reasoning-bias: Such incorrect new boundary frames also lead to the reasoning bias during frame-query interaction, reducing the generalization ability of model.To alleviate above limitations, in this paper, we propose a novel Siamese Sampling and Reasoning Network (SSRN) for TSG, which introduces a siamese sampling mechanism to generate additional contextual frames to enrich and refine the new boundaries. Specifically, a reasoning strategy is developed to learn the inter-relationship among these frames and generate soft labels on boundaries for more accurate frame-query reasoning. Such mechanism is also able to supplement the absent consecutive visual semantics to the sampled sparse frames for fine-grained activity understanding.Extensive experiments demonstrate the effectiveness of SSRN on three challenging datasets.", + "author": "Jiahao Zhu; Daizong Liu; Pan Zhou; Xing Di; Yu Cheng; Song Yang; Wenzheng Xu; Zichuan Xu; Yao Wan; Lichao Sun; Zeyu Xiong", + "authorids": "/j/jiahao-zhu/; /d/daizong-liu/; /p/pan-zhou/; /x/xing-di/; /y/yu-cheng/; /s/song-yang/; /w/wenzheng-xu/; /z/zichuan-xu/; /y/yao-wan/; /l/lichao-sun/; /z/zeyu-xiong/", + "bibtex": "@inproceedings{zhu-etal-2022-rethinking,\n title = \"Rethinking the Video Sampling and Reasoning Strategies for Temporal Sentence Grounding\",\n author = \"Zhu, Jiahao and\n Liu, Daizong and\n Zhou, Pan and\n Di, Xing and\n Cheng, Yu and\n Yang, Song and\n Xu, Wenzheng and\n Xu, Zichuan and\n Wan, Yao and\n Sun, Lichao and\n Xiong, Zeyu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.41/\",\n doi = \"10.18653/v1/2022.findings-emnlp.41\",\n pages = \"590--600\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.41.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.41/", + "pdf_size": 1062562, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17105104148241434021&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": ";;;;;;;;;;", + "aff_domain": ";;;;;;;;;;", + "email": ";;;;;;;;;;", + "github": "", + "project": "", + "author_num": 11 + }, + { + "id": "2022.emnlp-main.294", + "title": "Retrieval Augmentation for Commonsense Reasoning: A Unified Approach", + "track": "main", + "status": "Main", + "award": false, + "abstract": "A common thread of retrieval-augmented methods in the existing literature focuses on retrieving encyclopedic knowledge, such as Wikipedia, which facilitates well-defined entity and relation spaces that can be modeled. However, applying such methods to commonsense reasoning tasks faces two unique challenges, i.e., the lack of a general large-scale corpus for retrieval and a corresponding effective commonsense retriever. In this paper, we systematically investigate how to leverage commonsense knowledge retrieval to improve commonsense reasoning tasks. We proposed a unified framework of retrieval-augmented commonsense reasoning (called RACo), including a newly constructed commonsense corpus with over 20 million documents and novel strategies for training a commonsense retriever. We conducted experiments on four different commonsense reasoning tasks. Extensive evaluation results showed that our proposed RACo can significantly outperform other knowledge-enhanced method counterparts, achieving new SoTA performance on the CommonGen and CREAK leaderboards.", + "author": "Wenhao Yu; Chenguang Zhu; Zhihan Zhang; Shuohang Wang; Zhuosheng Zhang; Yuwei Fang; Meng Jiang", + "authorids": "/w/wenhao-yu/; /c/chenguang-zhu/; /z/zhihan-zhang/; /s/shuohang-wang/; /z/zhuosheng-zhang/; /y/yuwei-fang/; /m/meng-jiang/", + "bibtex": "@inproceedings{yu-etal-2022-retrieval,\n title = \"Retrieval Augmentation for Commonsense Reasoning: A Unified Approach\",\n author = \"Yu, Wenhao and\n Zhu, Chenguang and\n Zhang, Zhihan and\n Wang, Shuohang and\n Zhang, Zhuosheng and\n Fang, Yuwei and\n Jiang, Meng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.294/\",\n doi = \"10.18653/v1/2022.emnlp-main.294\",\n pages = \"4364--4377\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.294.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.294/", + "pdf_size": 353761, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11353454637089110416&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "https://github.com/wyu97/RACo", + "project": "https://inklab.usc.edu/CommonGen/leaderboard.html; https://www.cs.utexas.edu/~yasumasa/creak/leaderboard.html", + "author_num": 7 + }, + { + "id": "2022.emnlp-main.772", + "title": "Retrieval Augmented Visual Question Answering with Outside Knowledge", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Outside-Knowledge Visual Question Answering (OK-VQA) is a challenging VQA task that requires retrieval of external knowledge to answer questions about images. Recent OK-VQA systems use Dense Passage Retrieval (DPR) to retrieve documents from external knowledge bases, such as Wikipedia, but with DPR trained separately from answer generation, introducing a potential limit on the overall system performance.Instead, we propose a joint training scheme which includes differentiable DPR integrated with answer generation so that the system can be trained in an end-to-end fashion. Our experiments show that our scheme outperforms recent OK-VQA systems with strong DPR for retrieval. We also introduce new diagnostic metrics to analyze how retrieval and generation interact. The strong retrieval ability of our model significantly reduces the number of retrieved documents needed in training, yielding significant benefits in answer quality and computation required for training.", + "author": "Weizhe Lin; Bill Byrne", + "authorids": "/w/weizhe-lin/; /b/bill-byrne/", + "bibtex": "@inproceedings{lin-byrne-2022-retrieval,\n title = \"Retrieval Augmented Visual Question Answering with Outside Knowledge\",\n author = \"Lin, Weizhe and\n Byrne, Bill\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.772/\",\n doi = \"10.18653/v1/2022.emnlp-main.772\",\n pages = \"11238--11254\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.772.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.772/", + "pdf_size": 1420962, + "gs_citation": 77, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6487791700193964528&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Engineering, University of Cambridge, United Kingdom; Department of Engineering, University of Cambridge, United Kingdom", + "aff_domain": "cam.ac.uk;eng.cam.ac.uk", + "email": "cam.ac.uk;eng.cam.ac.uk", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Cambridge", + "aff_unique_dep": "Department of Engineering", + "aff_unique_url": "https://www.cam.ac.uk", + "aff_unique_abbr": "Cambridge", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.149", + "title": "Retrieval as Attention: End-to-end Learning of Retrieval and Reading within a Single Transformer", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Systems for knowledge-intensive tasks such as open-domain question answering (QA) usually consist of two stages: efficient retrieval of relevant documents from a large corpus and detailed reading of the selected documents. This is usually done through two separate models, a retriever that encodes the query and finds nearest neighbors, and a reader based on Transformers. These two components are usually modeled separately, which necessitates a cumbersome implementation and is awkward to optimize in an end-to-end fashion. In this paper, we revisit this design and eschew the separate architecture and training in favor of a single Transformer that performs retrieval as attention (RAA), and end-to-end training solely based on supervision from the end QA task. We demonstrate for the first time that an end-to-end trained single Transformer can achieve both competitive retrieval and QA performance on in-domain datasets, matching or even slightly outperforming state-of-the-art dense retrievers and readers. Moreover, end-to-end adaptation of our model significantly boosts its performance on out-of-domain datasets in both supervised and unsupervised settings, making our model a simple and adaptable end-to-end solution for knowledge-intensive tasks.", + "author": "Zhengbao Jiang; Luyu Gao; Zhiruo Wang; Jun Araki; Haibo Ding; Jamie Callan; Graham Neubig", + "authorids": "/z/zhengbao-jiang/; /l/luyu-gao/; /z/zhiruo-wang/; /j/jun-araki/; /h/haibo-ding/; /j/jamie-callan/; /g/graham-neubig/", + "bibtex": "@inproceedings{jiang-etal-2022-retrieval,\n title = \"Retrieval as Attention: End-to-end Learning of Retrieval and Reading within a Single Transformer\",\n author = \"Jiang, Zhengbao and\n Gao, Luyu and\n Wang, Zhiruo and\n Araki, Jun and\n Ding, Haibo and\n Callan, Jamie and\n Neubig, Graham\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.149/\",\n doi = \"10.18653/v1/2022.emnlp-main.149\",\n pages = \"2336--2349\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.149.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.149/", + "pdf_size": 521846, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10665430271626906744&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Language Technologies Institute, Carnegie Mellon University\u2661; Language Technologies Institute, Carnegie Mellon University\u2661; Bosch Research North America\u2662; Bosch Research North America\u2662\u2020; Language Technologies Institute, Carnegie Mellon University\u2661; Language Technologies Institute, Carnegie Mellon University\u2661; Language Technologies Institute, Carnegie Mellon University\u2661", + "aff_domain": "cs.cmu.edu;cs.cmu.edu;us.bosch.com;us.bosch.com;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "email": "cs.cmu.edu;cs.cmu.edu;us.bosch.com;us.bosch.com;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu", + "github": "https://github.com/jzbjyb/ReAtt", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;1;0;0;0", + "aff_unique_norm": "Carnegie Mellon University;Bosch Research North America", + "aff_unique_dep": "Language Technologies Institute;", + "aff_unique_url": "https://www.cmu.edu;https://research.bosch.com/northamerica", + "aff_unique_abbr": "CMU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.307", + "title": "Retrieval-Augmented Generative Question Answering for Event Argument Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Event argument extraction has long been studied as a sequential prediction problem with extractive-based methods, tackling each argument in isolation. Although recent work proposes generation-based methods to capture cross-argument dependency, they require generating and post-processing a complicated target sequence (template). Motivated by these observations and recent pretrained language models\u2019 capabilities of learning from demonstrations. We propose a retrieval-augmented generative QA model (R-GQA) for event argument extraction. It retrieves the most similar QA pair and augments it as prompt to the current example\u2019s context, then decodes the arguments as answers. Our approach outperforms substantially prior methods across various settings (i.e. fully supervised, domain transfer, and fewshot learning). Finally, we propose a clustering-based sampling strategy (JointEnc) and conduct a thorough analysis of how different strategies influence the few-shot learning performances.", + "author": "Xinya Du; Heng Ji", + "authorids": "/x/xinya-du/; /h/heng-ji/", + "bibtex": "@inproceedings{du-ji-2022-retrieval,\n title = \"Retrieval-Augmented Generative Question Answering for Event Argument Extraction\",\n author = \"Du, Xinya and\n Ji, Heng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.307/\",\n doi = \"10.18653/v1/2022.emnlp-main.307\",\n pages = \"4649--4666\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.307.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.307/", + "pdf_size": 1447896, + "gs_citation": 47, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4096255576245629101&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, The University of Texas at Dallas; Department of Computer Science, University of Illinois Urbana-Champaign", + "aff_domain": "utdallas.edu;illinois.edu", + "email": "utdallas.edu;illinois.edu", + "github": "https://github.com/xinyadu/RGQA", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "The University of Texas at Dallas;University of Illinois Urbana-Champaign", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science", + "aff_unique_url": "https://www.utdallas.edu;https://illinois.edu", + "aff_unique_abbr": "UT Dallas;UIUC", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Dallas;Urbana-Champaign", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.35", + "title": "RetroMAE: Pre-Training Retrieval-oriented Language Models Via Masked Auto-Encoder", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite pre-training\u2019s progress in many important NLP tasks, it remains to explore effective pre-training strategies for dense retrieval. In this paper, we propose RetroMAE, a new retrieval oriented pre-training paradigm based on Masked Auto-Encoder (MAE). RetroMAE is highlighted by three critical designs. 1) A novel MAE workflow, where the input sentence is polluted for encoder and decoder with different masks. The sentence embedding is generated from the encoder\u2019s masked input; then, the original sentence is recovered based on the sentence embedding and the decoder\u2019s masked input via masked language modeling. 2) Asymmetric model structure, with a full-scale BERT like transformer as encoder, and a one-layer transformer as decoder. 3) Asymmetric masking ratios, with a moderate ratio for encoder: 15 30%, and an aggressive ratio for decoder: 50 70%. Our framework is simple to realize and empirically competitive: the pre-trained models dramatically improve the SOTA performances on a wide range of dense retrieval benchmarks, like BEIR and MS MARCO. The source code and pre-trained models are made publicly available at https://github.com/staoxiao/RetroMAE so as to inspire more interesting research.", + "author": "Shitao Xiao; Zheng Liu; Yingxia Shao; Zhao Cao", + "authorids": "/s/shitao-xiao/; /z/zheng-liu/; /y/yingxia-shao/; /z/zhao-cao/", + "bibtex": "@inproceedings{xiao-etal-2022-retromae,\n title = \"{R}etro{MAE}: Pre-Training Retrieval-oriented Language Models Via Masked Auto-Encoder\",\n author = \"Xiao, Shitao and\n Liu, Zheng and\n Shao, Yingxia and\n Cao, Zhao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.35/\",\n doi = \"10.18653/v1/2022.emnlp-main.35\",\n pages = \"538--548\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.35.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.35/", + "pdf_size": 353066, + "gs_citation": 170, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9894715373609371467&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": "Beijing University of Posts and Telecommunications; Huawei Technologies Ltd. Co.; Beijing University of Posts and Telecommunications; Huawei Technologies Ltd. Co.", + "aff_domain": "bupt.edu.cn;huawei.com;bupt.edu.cn;huawei.com", + "email": "bupt.edu.cn;huawei.com;bupt.edu.cn;huawei.com", + "github": "https://github.com/staoxiao/RetroMAE", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;1", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Huawei Technologies", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bupt.edu.cn/;https://www.huawei.com", + "aff_unique_abbr": "BUPT;Huawei", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.433", + "title": "Retrofitting Multilingual Sentence Embeddings with Abstract Meaning Representation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We introduce a new method to improve existing multilingual sentence embeddings with Abstract Meaning Representation (AMR). Compared with the original textual input, AMR is a structured semantic representation that presents the core concepts and relations in a sentence explicitly and unambiguously. It also helps reduce the surface variations across different expressions and languages. Unlike most prior work that only evaluates the ability to measure semantic similarity, we present a thorough evaluation of existing multilingual sentence embeddings and our improved versions, which include a collection of five transfer tasks in different downstream applications. Experiment results show that retrofitting multilingual sentence embeddings with AMR leads to better state-of-the-art performance on both semantic textual similarity and transfer tasks.", + "author": "Deng Cai; Xin Li; Jackie Chun-Sing Ho; Lidong Bing; Wai Lam", + "authorids": "/d/deng-cai/; /x/xin-li/; /j/jackie-chun-sing-ho/; /l/lidong-bing/; /w/wai-lam/", + "bibtex": "@inproceedings{cai-etal-2022-retrofitting,\n title = \"Retrofitting Multilingual Sentence Embeddings with {A}bstract {M}eaning {R}epresentation\",\n author = \"Cai, Deng and\n Li, Xin and\n Ho, Jackie Chun-Sing and\n Bing, Lidong and\n Lam, Wai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.433/\",\n doi = \"10.18653/v1/2022.emnlp-main.433\",\n pages = \"6456--6472\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.433.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.433/", + "pdf_size": 349636, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10127127528291909173&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff": "The Chinese University of Hong Kong+DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; The Chinese University of Hong Kong; DAMO Academy, Alibaba Group; The Chinese University of Hong Kong", + "aff_domain": "gmail.com;alibaba-inc.com;alibaba-inc.com;link.cuhk.edu.hk;se.cuhk.edu.hk", + "email": "gmail.com;alibaba-inc.com;alibaba-inc.com;link.cuhk.edu.hk;se.cuhk.edu.hk", + "github": "https://github.com/jcyk/MSE-AMR", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;1;0;1;0", + "aff_unique_norm": "The Chinese University of Hong Kong;Alibaba Group", + "aff_unique_dep": ";DAMO Academy", + "aff_unique_url": "https://www.cuhk.edu.hk;https://www.alibaba-group.com", + "aff_unique_abbr": "CUHK;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.580", + "title": "Revisiting DocRED - Addressing the False Negative Problem in Relation Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The DocRED dataset is one of the most popular and widely used benchmarks for document-level relation extraction (RE). It adopts a recommend-revise annotation scheme so as to have a large-scale annotated dataset. However, we find that the annotation of DocRED is incomplete, i.e., false negative samples are prevalent. We analyze the causes and effects of the overwhelming false negative problem in the DocRED dataset. To address the shortcoming, we re-annotate 4,053 documents in the DocRED dataset by adding the missed relation triples back to the original DocRED. We name our revised DocRED dataset Re-DocRED. We conduct extensive experiments with state-of-the-art neural models on both datasets, and the experimental results show that the models trained and evaluated on our Re-DocRED achieve performance improvements of around 13 F1 points. Moreover, we conduct a comprehensive analysis to identify the potential areas for further improvement.", + "author": "Qingyu Tan; Lu Xu; Lidong Bing; Hwee Tou Ng; Sharifah Mahani Aljunied", + "authorids": "/q/qingyu-tan/; /l/lu-xu/; /l/lidong-bing/; /h/hwee-tou-ng/; /s/sharifah-mahani-aljunied/", + "bibtex": "@inproceedings{tan-etal-2022-revisiting,\n title = \"Revisiting {D}oc{RED} - Addressing the False Negative Problem in Relation Extraction\",\n author = \"Tan, Qingyu and\n Xu, Lu and\n Bing, Lidong and\n Ng, Hwee Tou and\n Aljunied, Sharifah Mahani\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.580/\",\n doi = \"10.18653/v1/2022.emnlp-main.580\",\n pages = \"8472--8487\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.580.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.580/", + "pdf_size": 280529, + "gs_citation": 91, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15242634560237305502&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "https://github.com/tonytan48/Re-DocRED", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.463", + "title": "Revisiting Grammatical Error Correction Evaluation and Beyond", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pretraining-based (PT-based) automatic evaluation metrics (e.g., BERTScore and BARTScore) have been widely used in several sentence generation tasks (e.g., machine translation and text summarization) due to their better correlation with human judgments over traditional overlap-based methods. Although PT-based methods have become the de facto standard for training grammatical error correction (GEC) systems, GEC evaluation still does not benefit from pretrained knowledge. This paper takes the first step towards understanding and improving GEC evaluation with pretraining. We first find that arbitrarily applying PT-based metrics to GEC evaluation brings unsatisfactory correlation results because of the excessive attention to inessential systems outputs (e.g., unchanged parts). To alleviate the limitation, we propose a novel GEC evaluation metric to achieve the best of both worlds, namely PT-M2 which only uses PT-based metrics to score those corrected parts. Experimental results on the CoNLL14 evaluation task show that PT-M2 significantly outperforms existing methods, achieving a new state-of-the-art result of 0.949 Pearson correlation. Further analysis reveals that PT-M2 is robust to evaluate competitive GEC systems. Source code and scripts are freely available at https://github.com/pygongnlp/PT-M2.", + "author": "Peiyuan Gong; Xuebo Liu; Heyan Huang; Min Zhang", + "authorids": "/p/peiyuan-gong/; /x/xuebo-liu/; /h/he-yan-huang/; /m/min-zhang/", + "bibtex": "@inproceedings{gong-etal-2022-revisiting,\n title = \"Revisiting Grammatical Error Correction Evaluation and Beyond\",\n author = \"Gong, Peiyuan and\n Liu, Xuebo and\n Huang, Heyan and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.463/\",\n doi = \"10.18653/v1/2022.emnlp-main.463\",\n pages = \"6891--6902\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.463.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.463/", + "pdf_size": 1238389, + "gs_citation": 22, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10220715940588746905&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China; Institute of Computing and Intelligence, Harbin Institute of Technology, Shenzhen, China; School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China; Institute of Computing and Intelligence, Harbin Institute of Technology, Shenzhen, China", + "aff_domain": "bit.edu.cn;hit.edu.cn;bit.edu.cn;hit.edu.cn", + "email": "bit.edu.cn;hit.edu.cn;bit.edu.cn;hit.edu.cn", + "github": "https://github.com/pygongnlp/PT-M2", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;1", + "aff_unique_norm": "Beijing Institute of Technology;Harbin Institute of Technology", + "aff_unique_dep": "School of Computer Science and Technology;Institute of Computing and Intelligence", + "aff_unique_url": "http://www.bit.edu.cn;http://www.hhit.edu.cn", + "aff_unique_abbr": "BIT;HIT", + "aff_campus_unique_index": "0;1;0;1", + "aff_campus_unique": "Beijing;Shenzhen", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.168", + "title": "Revisiting Parameter-Efficient Tuning: Are We Really There Yet?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Parameter-Efficient Tuning (PETuning) methods have been deemed by many as the new paradigm for using pretrained language models (PLMs). By tuning just a fraction amount of parameters comparing to full model finetuning, PETuning methods claim to have achieved performance on par with or even better than finetuning. In this work, we take a step back and re-examine these PETuning methods by conducting the first comprehensive investigation into the training and evaluation of them. We found the problematic validation and testing practice in current studies, when accompanied by the instability nature of PETuning methods, has led to unreliable conclusions. When being compared under a truly fair evaluation protocol, PETuning cannot yield consistently competitive performance while finetuning remains to be the best-performing method in medium- and high-resource settings. We delve deeper into the cause of the instability and observed that the number of trainable parameters and training iterations are two main factors: reducing trainable parameters and prolonging training iterations may lead to higher stability in PETuning methods.", + "author": "Guanzheng Chen; Fangyu Liu; Zaiqiao Meng; Shangsong Liang", + "authorids": "/g/guanzheng-chen/; /f/fangyu-liu/; /z/zaiqiao-meng/; /s/shangsong-liang/", + "bibtex": "@inproceedings{chen-etal-2022-revisiting,\n title = \"Revisiting Parameter-Efficient Tuning: Are We Really There Yet?\",\n author = \"Chen, Guanzheng and\n Liu, Fangyu and\n Meng, Zaiqiao and\n Liang, Shangsong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.168/\",\n doi = \"10.18653/v1/2022.emnlp-main.168\",\n pages = \"2612--2626\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.168.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.168/", + "pdf_size": 1203764, + "gs_citation": 100, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=407971608672321475&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Sun Yat-sen University; University of Cambridge; University of Glasgow; Mohamed bin Zayed University of Artificial Intelligence", + "aff_domain": "gmail.com;cam.ac.uk;glasgow.ac.uk;gmail.com", + "email": "gmail.com;cam.ac.uk;glasgow.ac.uk;gmail.com", + "github": "https://github.com/guanzhchen/PETuning", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Sun Yat-sen University;University of Cambridge;University of Glasgow;Mohamed bin Zayed University of Artificial Intelligence", + "aff_unique_dep": ";;;", + "aff_unique_url": "http://www.sysu.edu.cn/;https://www.cam.ac.uk;https://www.gla.ac.uk;https://www.mbzuai.ac.ae", + "aff_unique_abbr": "SYSU;Cambridge;Glasgow;MBZUAI", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;1;1;2", + "aff_country_unique": "China;United Kingdom;United Arab Emirates" + }, + { + "id": "2022.emnlp-main.205", + "title": "Revisiting Pre-trained Language Models and their Evaluation for Arabic Natural Language Processing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "There is a growing body of work in recent years to develop pre-trained language models (PLMs) for the Arabic language. This work addresses two major problems in existing Arabic PLMs that limit the progress of the Arabic NLU and NLG fields. First, existing Arabic PLMs are not well-explored and their pre-training can be improved significantly using a more methodical approach. Second, there is a lack of systematic and reproducible evaluation of these models in the literature. We revisit both the pre-training and evaluation of Arabic PLMs. In terms of pre-training, we explore the impact of the quality of the pretraining data, the size of the model, and the incorporation of character-level information on Arabic PLM. As a result, we release three new Arabic BERT-style models ( JABER, Char-JABER, and SABER), and two T5-style models (AT5S and AT5B). In terms of evaluation, we conduct a comprehensive empirical study to systematically evaluate the performance of existing state-of-the-art models on ALUE, a leaderboard-powered benchmark for Arabic NLU tasks, and on a subset of the Arabic generative tasks. We show that our models significantly outperform existing Arabic PLMs and achieve a new state-of-the-art performance on discriminative and generative Arabic NLU and NLG tasks. Our models and source code to reproduce results will be made available upon acceptance.", + "author": "Abbas Ghaddar; Yimeng Wu; Sunyam Bagga; Ahmad Rashid; Khalil Bibi; Mehdi Rezagholizadeh; Chao Xing; Yasheng Wang; Xinyu Duan; Zhefeng Wang; Baoxing Huai; Xin Jiang; Qun Liu; Phillippe Langlais", + "authorids": "/a/abbas-ghaddar/; /y/yimeng-wu/; /s/sunyam-bagga/; /a/ahmad-rashid/; /k/khalil-bibi/; /m/mehdi-rezagholizadeh/; /c/chao-xing/; /y/yasheng-wang/; /x/xinyu-duan/; /z/zhefeng-wang/; /b/baoxing-huai/; /x/xin-jiang/; /q/qun-liu/; /p/philippe-langlais/", + "bibtex": "@inproceedings{ghaddar-etal-2022-revisiting,\n title = \"Revisiting Pre-trained Language Models and their Evaluation for {A}rabic Natural Language Processing\",\n author = \"Ghaddar, Abbas and\n Wu, Yimeng and\n Bagga, Sunyam and\n Rashid, Ahmad and\n Bibi, Khalil and\n Rezagholizadeh, Mehdi and\n Xing, Chao and\n Wang, Yasheng and\n Duan, Xinyu and\n Wang, Zhefeng and\n Huai, Baoxing and\n Jiang, Xin and\n Liu, Qun and\n Langlais, Phillippe\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.205/\",\n doi = \"10.18653/v1/2022.emnlp-main.205\",\n pages = \"3135--3151\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.205.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.205/", + "pdf_size": 301967, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2099685346786823969&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Huawei Technologies Co., Ltd.; Huawei Technologies Co., Ltd.; Huawei Technologies Co., Ltd.; Huawei Technologies Co., Ltd.; Huawei Technologies Co., Ltd.; Huawei Technologies Co., Ltd.; Huawei Technologies Co., Ltd.; Huawei Technologies Co., Ltd.; Huawei Cloud Computing Technologies Co., Ltd; Huawei Cloud Computing Technologies Co., Ltd; Huawei Cloud Computing Technologies Co., Ltd; Huawei Technologies Co., Ltd.; Huawei Technologies Co., Ltd.; RALI/DIRO, Universit\u00e9 de Montr\u00e9al, Canada", + "aff_domain": "huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;iro.umontreal.ca", + "email": "huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;huawei.com;iro.umontreal.ca", + "github": "", + "project": "", + "author_num": 14, + "aff_unique_index": "0;0;0;0;0;0;0;0;1;1;1;0;0;2", + "aff_unique_norm": "Huawei Technologies;Huawei Cloud Computing Technologies Co., Ltd;Universit\u00e9 de Montr\u00e9al", + "aff_unique_dep": ";;RALI/DIRO", + "aff_unique_url": "https://www.huawei.com;https://www.huawei.com/en/cloud;https://www.umontreal.ca", + "aff_unique_abbr": "Huawei;Huawei Cloud;UdeM", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Montr\u00e9al", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0;0;1", + "aff_country_unique": "China;Canada" + }, + { + "id": "2022.findings-emnlp.534", + "title": "Revisiting Transformer-based Models for Long Document Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The recent literature in text classification is biased towards short text sequences (e.g., sentences or paragraphs). In real-world applications, multi-page multi-paragraph documents are common and they cannot be efficiently encoded by vanilla Transformer-based models. We compare different Transformer-based Long Document Classification (TrLDC) approaches that aim to mitigate the computational overhead of vanilla transformers to encode much longer text, namely sparse attention and hierarchical encoding methods.We examine several aspects of sparse attention (e.g., size of local attention window, use of global attention) and hierarchical (e.g., document splitting strategy) transformers on four document classification datasets covering different domains. We observe a clear benefit from being able to process longer text, and, based on our results, we derive practical advice of applying Transformer-based models on long document classification tasks.", + "author": "Xiang Dai; Ilias Chalkidis; Sune Darkner; Desmond Elliott", + "authorids": "/x/xiang-dai/; /i/ilias-chalkidis/; /s/sune-darkner/; /d/desmond-elliott/", + "bibtex": "@inproceedings{dai-etal-2022-revisiting,\n title = \"Revisiting Transformer-based Models for Long Document Classification\",\n author = \"Dai, Xiang and\n Chalkidis, Ilias and\n Darkner, Sune and\n Elliott, Desmond\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.534/\",\n doi = \"10.18653/v1/2022.findings-emnlp.534\",\n pages = \"7212--7230\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.534.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.534/", + "pdf_size": 547923, + "gs_citation": 90, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=61034637167531933&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/coastalcph/trldc", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-industry.57", + "title": "Revisiting and Advancing Chinese Natural Language Understanding with Accelerated Heterogeneous Knowledge Pre-training", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Recently, knowledge-enhanced pre-trained language models (KEPLMs) improve context-aware representations via learning from structured relations in knowledge bases, and/or linguistic knowledge from syntactic or dependency analysis. Unlike English, there is a lack of high-performing open-source Chinese KEPLMs in the natural language processing (NLP) community to support various language understanding applications. In this paper, we revisit and advance the development of Chinese natural language understanding with a series of novel Chinese KEPLMs released in various parameter sizes, namely CKBERT (Chinese knowledge-enhanced BERT). Specifically, both relational and linguistic knowledge is effectively injected into CKBERT based on two novel pre-training tasks, i.e., linguistic-aware masked language modeling and contrastive multi-hop relation modeling. Based on the above two pre-training paradigms and our in-house implemented TorchAccelerator, we have pre-trained base (110M), large (345M) and huge (1.3B) versions of CKBERT efficiently on GPU clusters. Experiments demonstrate that CKBERT consistently outperforms strong baselines for Chinese over various benchmark NLP tasks and in terms of different model sizes.", + "author": "Taolin Zhang; Junwei Dong; Jianing Wang; Chengyu Wang; Ang Wang; Yinghui Liu; Jun Huang; Yong Li; Xiaofeng He", + "authorids": "/t/taolin-zhang/; /j/junwei-dong/; /j/jianing-wang/; /c/chengyu-wang/; /a/ang-wang/; /y/yinghui-liu/; /j/jun-huang/; /y/yong-li/; /x/xiaofeng-he/", + "bibtex": "@inproceedings{zhang-etal-2022-revisiting,\n title = \"Revisiting and Advancing {C}hinese Natural Language Understanding with Accelerated Heterogeneous Knowledge Pre-training\",\n author = \"Zhang, Taolin and\n Dong, Junwei and\n Wang, Jianing and\n Wang, Chengyu and\n Wang, Ang and\n Liu, Yinghui and\n Huang, Jun and\n Li, Yong and\n He, Xiaofeng\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.57/\",\n doi = \"10.18653/v1/2022.emnlp-industry.57\",\n pages = \"560--570\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.57.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.57/", + "pdf_size": 551551, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16135964434893171763&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "East China Normal University+Alibaba Group; Alibaba Group+Chongqing University; East China Normal University+Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; East China Normal University", + "aff_domain": "gmail.com;alibaba-inc.com; ; ; ; ; ; ; ", + "email": "gmail.com;alibaba-inc.com; ; ; ; ; ; ; ", + "github": "https://github.com/alibaba/EasyNLP", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;1+2;0+1;1;1;1;1;1;0", + "aff_unique_norm": "East China Normal University;Alibaba Group;Chongqing University", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.alibaba.com;https://www.cqu.edu.cn", + "aff_unique_abbr": "ECNU;Alibaba;CQU", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.510", + "title": "Revisiting the Roles of \u201cText\u201d in Text Games", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Text games present opportunities for natural language understanding (NLU) methods to tackle reinforcement learning (RL) challenges. However, recent work has questioned the necessity of NLU by showing random text hashes could perform decently. In this paper, we pursue a fine-grained investigation into the roles of text in the face of different RL challenges, and reconcile that semantic and non-semantic language representations could be complementary rather than contrasting. Concretely, we propose a simple scheme to extract relevant contextual information into an approximate state hash as extra input for an RNN-based text agent. Such a lightweight plug-in achieves competitive performance with state-of-the-art text agents using advanced NLU techniques such as knowledge graph and passage retrieval, suggesting non-NLU methods might suffice to tackle the challenge of partial observability. However, if we remove RNN encoders and use approximate or even ground-truth state hash alone, the model performs miserably, which confirms the importance of semantic function approximation to tackle the challenge of combinatorially large observation and action spaces. Our findings and analysis provide new insights for designing better text game task setups and agents.", + "author": "Yi Gu; Shunyu Yao; Chuang Gan; Josh Tenenbaum; Mo Yu", + "authorids": "/y/yi-gu/; /s/shunyu-yao/; /c/chuang-gan/; /j/josh-tenenbaum/; /m/mo-yu/", + "bibtex": "@inproceedings{gu-etal-2022-revisiting,\n title = \"Revisiting the Roles of {\\textquotedblleft}Text{\\textquotedblright} in Text Games\",\n author = \"Gu, Yi and\n Yao, Shunyu and\n Gan, Chuang and\n Tenenbaum, Josh and\n Yu, Mo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.510/\",\n doi = \"10.18653/v1/2022.findings-emnlp.510\",\n pages = \"6867--6876\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.510.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.510/", + "pdf_size": 517014, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=424529921478945064&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.146", + "title": "Rich Knowledge Sources Bring Complex Knowledge Conflicts: Recalibrating Models to Reflect Conflicting Evidence", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Question answering models can use rich knowledge sources \u2014 up to one hundred retrieved passages and parametric knowledge in the large-scale language model (LM). Prior work assumes information in such knowledge sources is consistent with each other, paying little attention to how models blend information stored in their LM parameters with that from retrieved evidence documents. In this paper, we simulate knowledge conflicts (i.e., where parametric knowledge suggests one answer and different passages suggest different answers) and examine model behaviors. We find retrieval performance heavily impacts which sources models rely on, and current models mostly rely on non-parametric knowledgein their best-performing settings. We discover a troubling trend that contradictions among knowledge sources affect model confidence only marginally. To address this issue, we present a new calibration study, where models are discouraged from presenting any single answer when presented with multiple conflicting answer candidates in retrieved evidences.", + "author": "Hung-Ting Chen; Michael Zhang; Eunsol Choi", + "authorids": "/h/hung-ting-chen/; /m/michael-zhang/; /e/eunsol-choi/", + "bibtex": "@inproceedings{chen-etal-2022-rich,\n title = \"Rich Knowledge Sources Bring Complex Knowledge Conflicts: Recalibrating Models to Reflect Conflicting Evidence\",\n author = \"Chen, Hung-Ting and\n Zhang, Michael and\n Choi, Eunsol\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.146/\",\n doi = \"10.18653/v1/2022.emnlp-main.146\",\n pages = \"2292--2307\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.146.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.146/", + "pdf_size": 530985, + "gs_citation": 81, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15774705348086250281&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin", + "aff_domain": "utexas.edu;utexas.edu;utexas.edu", + "email": "utexas.edu;utexas.edu;utexas.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "The University of Texas at Austin", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.256", + "title": "RoChBert: Towards Robust BERT Fine-tuning for Chinese", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Despite of the superb performance on a wide range of tasks, pre-trained language models (e.g., BERT) have been proved vulnerable to adversarial texts. In this paper, we present RoChBERT, a framework to build more Robust BERT-based models by utilizing a more comprehensive adversarial graph to fuse Chinese phonetic and glyph features into pre-trained representations during fine-tuning. Inspired by curriculum learning, we further propose to augment the training dataset with adversarial texts in combination with intermediate samples. Extensive experiments demonstrate that RoChBERT outperforms previous methods in significant ways: (i) robust \u2013 RoChBERT greatly improves the model robustness without sacrificing accuracy on benign texts. Specifically, the defense lowers the success rates of unlimited and limited attacks by 59.43% and 39.33% respectively, while remaining accuracy of 93.30%; (ii) flexible \u2013 RoChBERT can easily extend to various language models to solve different downstream tasks with excellent performance; and (iii) efficient \u2013 RoChBERT can be directly applied to the fine-tuning stage without pre-training language model from scratch, and the proposed data augmentation method is also low-cost.", + "author": "Zihan Zhang; Jinfeng Li; Ning Shi; Bo Yuan; Xiangyu Liu; Rong Zhang; Hui Xue; Donghong Sun; Chao Zhang", + "authorids": "/z/zihan-zhang/; /j/jinfeng-li/; /n/ning-shi/; /b/bo-yuan/; /x/xiangyu-liu/; /r/rong-zhang/; /h/hui-xue/; /d/donghong-sun/; /c/chao-zhang-tu/", + "bibtex": "@inproceedings{zhang-etal-2022-rochbert,\n title = \"{R}o{C}h{B}ert: Towards Robust {BERT} Fine-tuning for {C}hinese\",\n author = \"Zhang, Zihan and\n Li, Jinfeng and\n Shi, Ning and\n Yuan, Bo and\n Liu, Xiangyu and\n Zhang, Rong and\n Xue, Hui and\n Sun, Donghong and\n Zhang, Chao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.256/\",\n doi = \"10.18653/v1/2022.findings-emnlp.256\",\n pages = \"3502--3516\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.256.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.256/", + "pdf_size": 862523, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5204974464481774557&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Tsinghua University+BNRist+Zhongguancun Lab; Alibaba Group; Alberta Machine Intelligence Institute+Dept. of Computing Science, University of Alberta; Alibaba Group; Alibaba Group; Alibaba Group; Alibaba Group; Tsinghua University; Tsinghua University", + "aff_domain": "tsinghua.org.cn;alibaba-inc.com;ualberta.ca;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;tsinghua.edu.cn;tsinghua.edu.cn", + "email": "tsinghua.org.cn;alibaba-inc.com;ualberta.ca;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;tsinghua.edu.cn;tsinghua.edu.cn", + "github": "https://github.com/zzh-z/RoChBERT", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1+2;3;4+5;3;3;3;3;0;0", + "aff_unique_norm": "Tsinghua University;BNRist;Zhongguancun Lab;Alibaba Group;Alberta Machine Intelligence Institute;University of Alberta", + "aff_unique_dep": ";;;;;Dept. of Computing Science", + "aff_unique_url": "https://www.tsinghua.edu.cn;;;https://www.alibaba.com;https://www.ami.alberta.ca;https://www.ualberta.ca", + "aff_unique_abbr": "THU;;;Alibaba;AMII;UAlberta", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;2+2;0;0;0;0;0;0", + "aff_country_unique": "China;;Canada" + }, + { + "id": "2022.emnlp-main.215", + "title": "Robots-Dont-Cry: Understanding Falsely Anthropomorphic Utterances in Dialog Systems", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Dialog systems are often designed or trained to output human-like responses. However, some responses may be impossible for a machine to truthfully say (e.g. \u201cthat movie made me cry\u201d). Highly anthropomorphic responses might make users uncomfortable or implicitly deceive them into thinking they are interacting with a human. We collect human ratings on the feasibility of approximately 900 two-turn dialogs sampled from 9 diverse data sources. Ratings are for two hypothetical machine embodiments: a futuristic humanoid robot and a digital assistant. We find that for some data-sources commonly used to train dialog systems, 20-30% of utterances are not viewed as possible for a machine. Rating is marginally affected by machine embodiment. We explore qualitative and quantitative reasons for these ratings. Finally, we build classifiers and explore how modeling configuration might affect output permissibly, and discuss implications for building less falsely anthropomorphic dialog systems.", + "author": "David Gros; Yu Li; Zhou Yu", + "authorids": "/d/david-gros/; /y/yu-li/; /z/zhou-yu/", + "bibtex": "@inproceedings{gros-etal-2022-robots,\n title = \"Robots-Dont-Cry: Understanding Falsely Anthropomorphic Utterances in Dialog Systems\",\n author = \"Gros, David and\n Li, Yu and\n Yu, Zhou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.215/\",\n doi = \"10.18653/v1/2022.emnlp-main.215\",\n pages = \"3266--3284\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.215.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.215/", + "pdf_size": 6929622, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=388579088136407761&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "University of California, Davis; Columbia University; Columbia University", + "aff_domain": "ucdavis.edu;columbia.edu;columbia.edu", + "email": "ucdavis.edu;columbia.edu;columbia.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;1", + "aff_unique_norm": "University of California, Davis;Columbia University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucdavis.edu;https://www.columbia.edu", + "aff_unique_abbr": "UC Davis;Columbia", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Davis;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.460", + "title": "Robust Question Answering against Distribution Shifts with Test-Time Adaption: An Empirical Study", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A deployed question answering (QA) model can easily fail when the test data has a distribution shift compared to the training data. Robustness tuning (RT) methods have been widely studied to enhance model robustness against distribution shifts before model deployment. However, can we improve a model after deployment? To answer this question, we evaluate test-time adaptation (TTA) to improve a model after deployment. We first introduce ColdQA, a unified evaluation benchmark for robust QA against text corruption and changes in language and domain. We then evaluate previous TTA methods on ColdQA and compare them to RT methods. We also propose a novel TTA method called online imitation learning (OIL). Through extensive experiments, we find that TTA is comparable to RT methods, and applying TTA after RT can significantly boost the performance on ColdQA. Our proposed OIL improves TTA to be more robust to variation in hyper-parameters and test distributions over time.", + "author": "Hai Ye; Yuyang Ding; Juntao Li; Hwee Tou Ng", + "authorids": "/h/hai-ye/; /y/yuyang-ding/; /j/juntao-li/; /h/hwee-tou-ng/", + "bibtex": "@inproceedings{ye-etal-2022-robust,\n title = \"Robust Question Answering against Distribution Shifts with Test-Time Adaption: An Empirical Study\",\n author = \"Ye, Hai and\n Ding, Yuyang and\n Li, Juntao and\n Ng, Hwee Tou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.460/\",\n doi = \"10.18653/v1/2022.findings-emnlp.460\",\n pages = \"6179--6192\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.460.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.460/", + "pdf_size": 735205, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6435996673152654940&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science, National University of Singapore; Soochow University, China; Soochow University, China; Department of Computer Science, National University of Singapore", + "aff_domain": "comp.nus.edu.sg;gmail.com;suda.edu.cn;comp.nus.edu.sg", + "email": "comp.nus.edu.sg;gmail.com;suda.edu.cn;comp.nus.edu.sg", + "github": "https://github.com/oceanypt/coldqa-tta", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "National University of Singapore;Soochow University", + "aff_unique_dep": "Department of Computer Science;", + "aff_unique_url": "https://www.nus.edu.sg;https://www.soochow.edu.cn", + "aff_unique_abbr": "NUS;Soochow U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;0", + "aff_country_unique": "Singapore;China" + }, + { + "id": "2022.findings-emnlp.88", + "title": "Robust Task-Oriented Dialogue Generation with Contrastive Pre-training and Adversarial Filtering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Data artifacts incentivize machine learning models to learn non-transferable generalizations by taking advantage of shortcuts in the data, andthere is growing evidence that data artifacts play a role for the strong results that deep learning models achieve in recent natural language processing benchmarks.In this paper, we focus on task-oriented dialogue and investigate whether popular datasets such as MultiWOZ contain such data artifacts.We found that by only keeping frequent phrases in the trainingexamples, state-of-the-art models perform similarly compared to the variant trained with full data, suggesting they exploit these spurious correlationsto solve the task. Motivated by this, we propose a contrastive learning based framework to encourage the model to ignore these cues and focus on learning generalisable patterns. We also experiment with adversarial filtering to remove easy training instances so that the model would focus on learning from the harder instances. We conduct a number of generalization experiments \u2014 e.g., cross-domain/dataset and adversarial tests \u2014 to assess the robustness of our approach and found that it works exceptionally well.", + "author": "Shiquan Yang; Xinting Huang; Jey Han Lau; Sarah Erfani", + "authorids": "/s/shiquan-yang/; /x/xinting-huang/; /j/jey-han-lau/; /s/sarah-erfani/", + "bibtex": "@inproceedings{yang-etal-2022-robust,\n title = \"Robust Task-Oriented Dialogue Generation with Contrastive Pre-training and Adversarial Filtering\",\n author = \"Yang, Shiquan and\n Huang, Xinting and\n Lau, Jey Han and\n Erfani, Sarah\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.88/\",\n doi = \"10.18653/v1/2022.findings-emnlp.88\",\n pages = \"1220--1234\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.88.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.88/", + "pdf_size": 609153, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18281253051687542794&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "The University of Melbourne; Tencent AI Lab; The University of Melbourne; The University of Melbourne", + "aff_domain": "student.unimelb.edu.au;tencent.com;unimelb.edu.au;unimelb.edu.au", + "email": "student.unimelb.edu.au;tencent.com;unimelb.edu.au;unimelb.edu.au", + "github": "https://github.com/shiquanyang/Robust-Dialogue", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of Melbourne;Tencent", + "aff_unique_dep": ";Tencent AI Lab", + "aff_unique_url": "https://www.unimelb.edu.au;https://ai.tencent.com", + "aff_unique_abbr": "UniMelb;Tencent AI Lab", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "Australia;China" + }, + { + "id": "2022.emnlp-main.653", + "title": "RobustLR: A Diagnostic Benchmark for Evaluating Logical Robustness of Deductive Reasoners", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transformers have been shown to be able to perform deductive reasoning on inputs containing rules and statements written in the English natural language. However, it is unclear if these models indeed follow rigorous logical reasoning to arrive at the prediction or rely on spurious correlation patterns in making decisions. A strong deductive reasoning model should consistently understand the semantics of different logical operators. To this end, we present RobustLR, a diagnostic benchmark that evaluates the robustness of language models to minimal logical edits in the inputs and different logical equivalence conditions. In our experiments with RoBERTa, T5, and GPT3 we show that the models trained on deductive reasoning datasets do not perform consistently on the RobustLR test set, thus showing that the models are not robust to our proposed logical perturbations. Further, we observe that the models find it especially hard to learn logical negation operators. Our results demonstrate the shortcomings of current language models in logical reasoning and call for the development of better inductive biases to teach the logical semantics to language models. All the datasets and code base have been made publicly available.", + "author": "Soumya Sanyal; Zeyi Liao; Xiang Ren", + "authorids": "/s/soumya-sanyal/; /z/zeyi-liao/; /x/xiang-ren/", + "bibtex": "@inproceedings{sanyal-etal-2022-robustlr,\n title = \"{R}obust{LR}: A Diagnostic Benchmark for Evaluating Logical Robustness of Deductive Reasoners\",\n author = \"Sanyal, Soumya and\n Liao, Zeyi and\n Ren, Xiang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.653/\",\n doi = \"10.18653/v1/2022.emnlp-main.653\",\n pages = \"9614--9631\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.653.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.653/", + "pdf_size": 860174, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2791735711403170337&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "University of Southern California; University of Southern California; University of Southern California", + "aff_domain": "usc.edu;usc.edu;usc.edu", + "email": "usc.edu;usc.edu;usc.edu", + "github": "https://github.com/INK-USC/RobustLR", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Southern California", + "aff_unique_dep": "", + "aff_unique_url": "https://www.usc.edu", + "aff_unique_abbr": "USC", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.783", + "title": "Robustifying Sentiment Classification by Maximally Exploiting Few Counterfactuals", + "track": "main", + "status": "Main", + "award": false, + "abstract": "For text classification tasks, finetuned language models perform remarkably well. Yet, they tend to rely on spurious patterns in training data, thus limiting their performance on out-of-distribution (OOD) test data. Among recent models aiming to avoid this spurious pattern problem, adding extra counterfactual samples to the training data has proven to be very effective. Yet, counterfactual data generation is costly since it relies on human annotation. Thus, we propose a novel solution that only requires annotation of a small fraction (e.g., 1%) of the original training data, and uses automatic generation of extra counterfactuals in an encoding vector space. We demonstrate the effectiveness of our approach in sentiment classification, using IMDb data for training and other sets for OOD tests (i.e., Amazon, SemEval and Yelp). We achieve noticeable accuracy improvements by adding only 1% manual counterfactuals: +3% compared to adding +100% in-distribution training samples, +1.3% compared to alternate counterfactual approaches.", + "author": "Maarten De Raedt; Fr\u00e9deric Godin; Chris Develder; Thomas Demeester", + "authorids": "/m/maarten-de-raedt/; /f/frederic-godin/; /c/chris-develder/; /t/thomas-demeester/", + "bibtex": "@inproceedings{de-raedt-etal-2022-robustifying,\n title = \"Robustifying Sentiment Classification by Maximally Exploiting Few Counterfactuals\",\n author = \"De Raedt, Maarten and\n Godin, Fr{\\'e}deric and\n Develder, Chris and\n Demeester, Thomas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.783/\",\n doi = \"10.18653/v1/2022.emnlp-main.783\",\n pages = \"11386--11400\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.783.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.783/", + "pdf_size": 981863, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16119072647001534029&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 8, + "aff": "Sinch Chatlayer+Ghent University; Sinch Chatlayer; Ghent University; Ghent University", + "aff_domain": "ugent.be;sinch.com;ugent.be;ugent.be", + "email": "ugent.be;sinch.com;ugent.be;ugent.be", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;1;1", + "aff_unique_norm": "Sinch;Ghent University", + "aff_unique_dep": "Chatlayer;", + "aff_unique_url": "https://www.sinch.com;https://www.ugent.be/en", + "aff_unique_abbr": ";UGent", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0;1;1", + "aff_country_unique": "Sweden;Belgium" + }, + { + "id": "2022.emnlp-main.116", + "title": "Robustness of Demonstration-based Learning Under Limited Data Scenario", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Demonstration-based learning has shown great potential in stimulating pretrained language models\u2019 ability under limited data scenario. Simply augmenting the input with some demonstrations can significantly improve performance on few-shot NER. However, why such demonstrations are beneficial for the learning process remains unclear since there is no explicit alignment between the demonstrations and the predictions. In this paper, we design pathological demonstrations by gradually removing intuitively useful information from the standard ones to take a deep dive of the robustness of demonstration-based sequence labeling and show that (1) demonstrations composed of random tokens still make the model a better few-shot learner; (2) the length of random demonstrations and the relevance of random tokens are the main factors affecting the performance; (3) demonstrations increase the confidence of model predictions on captured superficial patterns. We have publicly released our code at https://github.com/SALT-NLP/RobustDemo.", + "author": "Hongxin Zhang; Yanzhe Zhang; Ruiyi Zhang; Diyi Yang", + "authorids": "/h/hongxin-zhang/; /y/yanzhe-zhang/; /r/ruiyi-zhang/; /d/diyi-yang/", + "bibtex": "@inproceedings{zhang-etal-2022-robustness,\n title = \"Robustness of Demonstration-based Learning Under Limited Data Scenario\",\n author = \"Zhang, Hongxin and\n Zhang, Yanzhe and\n Zhang, Ruiyi and\n Yang, Diyi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.116/\",\n doi = \"10.18653/v1/2022.emnlp-main.116\",\n pages = \"1769--1782\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.116.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.116/", + "pdf_size": 1333488, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1345476272834567917&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Shanghai Jiao Tong University; Georgia Institute of Technology; Adobe Research; Stanford University", + "aff_domain": "sjtu.edu.cn;gatech.edu;adobe.com;cs.stanford.edu", + "email": "sjtu.edu.cn;gatech.edu;adobe.com;cs.stanford.edu", + "github": "https://github.com/SALT-NLP/RobustDemo", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "Shanghai Jiao Tong University;Georgia Institute of Technology;Adobe;Stanford University", + "aff_unique_dep": ";;Adobe Research;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.gatech.edu;https://research.adobe.com;https://www.stanford.edu", + "aff_unique_abbr": "SJTU;Georgia Tech;Adobe;Stanford", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0;1;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.25", + "title": "Robustness of Fusion-based Multimodal Classifiers to Cross-Modal Content Dilutions", + "track": "main", + "status": "Main", + "award": false, + "abstract": "As multimodal learning finds applications in a wide variety of high-stakes societal tasks, investigating their robustness becomes important. Existing work has focused on understanding the robustness of vision-and-language models to imperceptible variations on benchmark tasks. In this work, we investigate the robustness of multimodal classifiers to cross-modal dilutions \u2013 a plausible variation. We develop a model that, given a multimodal (image + text) input, generates additional dilution text that (a) maintains relevance and topical coherence with the image and existing text, and (b) when added to the original text, leads to misclassification of the multimodal input. Via experiments on Crisis Humanitarianism and Sentiment Detection tasks, we find that the performance of task-specific fusion-based multimodal classifiers drops by 23.3% and 22.5%, respectively, in the presence of dilutions generated by our model. Metric-based comparisons with several baselines and human evaluations indicate that our dilutions show higher relevance and topical coherence, while simultaneously being more effective at demonstrating the brittleness of the multimodal classifiers. Our work aims to highlight and encourage further research on the robustness of deep multimodal models to realistic variations, especially in human-facing societal applications.", + "author": "Gaurav Verma; Vishwa Vinay; Ryan Rossi; Srijan Kumar", + "authorids": "/g/gaurav-verma/; /v/vishwa-vinay/; /r/ryan-rossi/; /s/srijan-kumar/", + "bibtex": "@inproceedings{verma-etal-2022-robustness,\n title = \"Robustness of Fusion-based Multimodal Classifiers to Cross-Modal Content Dilutions\",\n author = \"Verma, Gaurav and\n Vinay, Vishwa and\n Rossi, Ryan and\n Kumar, Srijan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.25/\",\n doi = \"10.18653/v1/2022.emnlp-main.25\",\n pages = \"360--374\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.25.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.25/", + "pdf_size": 16365682, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6638162601180014892&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.348", + "title": "RuCoLA: Russian Corpus of Linguistic Acceptability", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Linguistic acceptability (LA) attracts the attention of the research community due to its many uses, such as testing the grammatical knowledge of language models and filtering implausible texts with acceptability classifiers.However, the application scope of LA in languages other than English is limited due to the lack of high-quality resources.To this end, we introduce the Russian Corpus of Linguistic Acceptability (RuCoLA), built from the ground up under the well-established binary LA approach. RuCoLA consists of 9.8k in-domain sentences from linguistic publications and 3.6k out-of-domain sentences produced by generative models. The out-of-domain set is created to facilitate the practical use of acceptability for improving language generation.Our paper describes the data collection protocol and presents a fine-grained analysis of acceptability classification experiments with a range of baseline approaches.In particular, we demonstrate that the most widely used language models still fall behind humans by a large margin, especially when detecting morphological and semantic errors. We release RuCoLA, the code of experiments, and a public leaderboard to assess the linguistic competence of language models for Russian.", + "author": "Vladislav Mikhailov; Tatiana Shamardina; Max Ryabinin; Alena Pestova; Ivan Smurov; Ekaterina Artemova", + "authorids": "/v/vladislav-mikhailov/; /t/tatiana-shamardina/; /m/max-ryabinin/; /a/alena-pestova/; /i/ivan-smurov/; /e/ekaterina-artemova/", + "bibtex": "@inproceedings{mikhailov-etal-2022-rucola,\n title = \"{R}u{C}o{LA}: {R}ussian Corpus of Linguistic Acceptability\",\n author = \"Mikhailov, Vladislav and\n Shamardina, Tatiana and\n Ryabinin, Max and\n Pestova, Alena and\n Smurov, Ivan and\n Artemova, Ekaterina\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.348/\",\n doi = \"10.18653/v1/2022.emnlp-main.348\",\n pages = \"5207--5227\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.348.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.348/", + "pdf_size": 387790, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15741719744117835803&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "SberDevices; ABBYY; HSE University+Yandex; HSE University; ABBYY; Huawei Noah\u2019s Ark Lab+Center for Information and Language Processing (CIS), MaiNLP lab, LMU Munich, Germany", + "aff_domain": "gmail.com; ; ; ; ; ", + "email": "gmail.com; ; ; ; ; ", + "github": "", + "project": "rucola-benchmark.com", + "author_num": 6, + "aff_unique_index": "0;1;2+3;2;1;4+5", + "aff_unique_norm": "SberDevices;ABBYY;Higher School of Economics;Yandex;Huawei;LMU Munich", + "aff_unique_dep": ";;;;Noah\u2019s Ark Lab;Center for Information and Language Processing (CIS)", + "aff_unique_url": "https://sberdevices.ru;https://www.abbyy.com;https://hse.ru;https://yandex.com;https://www.huawei.com;https://www.lmu.de", + "aff_unique_abbr": "SberDevices;ABBYY;HSE;Yandex;Huawei;LMU", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Munich", + "aff_country_unique_index": "0;0;0+0;0;0;1+2", + "aff_country_unique": "Russia;China;Germany" + }, + { + "id": "2022.findings-emnlp.379", + "title": "SALTED: A Framework for SAlient Long-tail Translation Error Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Traditional machine translation (MT) metrics provide an average measure of translation quality that is insensitive to the long tail of behavioral problems. Examples include translation of numbers, physical units, dropped content and hallucinations. These errors, which occur rarely and unpredictably in Neural Machine Translation (NMT), greatly undermine the reliability of state-of-the-art MT systems. Consequently, it is important to have visibility into these problems during model development.Towards this end, we introduce SALTED, a specifications-based framework for behavioral testing of NMT models. At the core of our approach is the use of high-precision detectors that flag errors (or alternatively, verify output correctness) between a source sentence and a system output. These detectors provide fine-grained measurements of long-tail errors, providing a trustworthy view of problems that were previously invisible. We demonstrate that such detectors could be used not just to identify salient long-tail errors in MT systems, but also for higher-recall filtering of the training data, fixing targeted errors with model fine-tuning in NMT and generating novel data for metamorphic testing to elicit further bugs in models.", + "author": "Vikas Raunak; Matt Post; Arul Menezes", + "authorids": "/v/vikas-raunak/; /m/matt-post/; /a/arul-menezes/", + "bibtex": "@inproceedings{raunak-etal-2022-salted,\n title = \"{SALTED}: A Framework for {SA}lient Long-tail Translation Error Detection\",\n author = \"Raunak, Vikas and\n Post, Matt and\n Menezes, Arul\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.379/\",\n doi = \"10.18653/v1/2022.findings-emnlp.379\",\n pages = \"5163--5179\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.379.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.379/", + "pdf_size": 297213, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11365241683402607726&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.456", + "title": "SAT: Improving Semi-Supervised Text Classification with Simple Instance-Adaptive Self-Training", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Self-training methods have been explored in recent years and have exhibited great performance in improving semi-supervised learning. This work presents a simple instance-adaptive self-training method (SAT) for semi-supervised text classification. SAT first generates two augmented views for each unlabeled data, and then trains a meta learner to automatically identify the relative strength of augmentations based on the similarity between the original view and the augmented views. The weakly-augmented view is fed to the model to produce a pseudo-label and the strongly-augmented view is used to train the model to predict the same pseudo-label. We conducted extensive experiments and analyses on three text classification datasets and found that with varying sizes of labeled training data, SAT consistently shows competitive performance compared to existing semi-supervised learning methods.", + "author": "Hui Chen; Wei Han; Soujanya Poria", + "authorids": "/h/hui-chen/; /w/wei-han/; /s/soujanya-poria/", + "bibtex": "@inproceedings{chen-etal-2022-sat,\n title = \"{SAT}: Improving Semi-Supervised Text Classification with Simple Instance-Adaptive Self-Training\",\n author = \"Chen, Hui and\n Han, Wei and\n Poria, Soujanya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.456/\",\n doi = \"10.18653/v1/2022.findings-emnlp.456\",\n pages = \"6141--6146\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.456.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.456/", + "pdf_size": 638868, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1513317212154380177&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Singapore University of Technology and Design; Singapore University of Technology and Design; Singapore University of Technology and Design", + "aff_domain": "mymail.sutd.edu.sg;mymail.sutd.edu.sg;sutd.edu.sg", + "email": "mymail.sutd.edu.sg;mymail.sutd.edu.sg;sutd.edu.sg", + "github": "https://github.com/declare-lab/SAT.git", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Singapore University of Technology and Design", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sutd.edu.sg", + "aff_unique_abbr": "SUTD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "2022.emnlp-main.823", + "title": "SCROLLS: Standardized CompaRison Over Long Language Sequences", + "track": "main", + "status": "Main", + "award": false, + "abstract": "NLP benchmarks have largely focused on short texts, such as sentences and paragraphs, even though long texts comprise a considerable amount of natural language in the wild. We introduce SCROLLS, a suite of tasks that require reasoning over long texts. We examine existing long-text datasets, and handpick ones where the text is naturally long, while prioritizing tasks that involve synthesizing information across the input. SCROLLS contains summarization, question answering, and natural language inference tasks, covering multiple domains, including literature, science, business, and entertainment. Initial baselines, including Longformer Encoder-Decoder, indicate that there is ample room for improvement on SCROLLS. We make all datasets available in a unified text-to-text format and host a live leaderboard to facilitate research on model architecture and pretraining methods.", + "author": "Uri Shaham; Elad Segal; Maor Ivgi; Avia Efrat; Ori Yoran; Adi Haviv; Ankit Gupta; Wenhan Xiong; Mor Geva; Jonathan Berant; Omer Levy", + "authorids": "/u/uri-shaham/; /e/elad-segal/; /m/maor-ivgi/; /a/avia-efrat/; /o/ori-yoran/; /a/adi-haviv/; /a/ankit-gupta/; /w/wenhan-xiong/; /m/mor-geva/; /j/jonathan-berant/; /o/omer-levy/", + "bibtex": "@inproceedings{shaham-etal-2022-scrolls,\n title = \"{SCROLLS}: Standardized {C}ompa{R}ison Over Long Language Sequences\",\n author = \"Shaham, Uri and\n Segal, Elad and\n Ivgi, Maor and\n Efrat, Avia and\n Yoran, Ori and\n Haviv, Adi and\n Gupta, Ankit and\n Xiong, Wenhan and\n Geva, Mor and\n Berant, Jonathan and\n Levy, Omer\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.823/\",\n doi = \"10.18653/v1/2022.emnlp-main.823\",\n pages = \"12007--12021\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.823.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.823/", + "pdf_size": 274389, + "gs_citation": 145, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1988929490186029835&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "The Blavatnik School of Computer Science, Tel Aviv University; The Blavatnik School of Computer Science, Tel Aviv University; The Blavatnik School of Computer Science, Tel Aviv University; The Blavatnik School of Computer Science, Tel Aviv University; The Blavatnik School of Computer Science, Tel Aviv University; The Blavatnik School of Computer Science, Tel Aviv University; IBM Research; Meta AI; Allen Institute for AI; The Blavatnik School of Computer Science, Tel Aviv University; Meta AI", + "aff_domain": ";;;;;;;;;;", + "email": ";;;;;;;;;;", + "github": "", + "project": "https://www.scrolls-benchmark.com", + "author_num": 11, + "aff_unique_index": "0;0;0;0;0;0;1;2;3;0;2", + "aff_unique_norm": "Tel Aviv University;IBM;Meta Platforms, Inc.;Allen Institute for AI", + "aff_unique_dep": "Blavatnik School of Computer Science;IBM Research;Meta AI;", + "aff_unique_url": "https://www.tau.ac.il;https://www.ibm.com/research;https://meta.com;https://allenai.org", + "aff_unique_abbr": "TAU;IBM;Meta;AI2", + "aff_campus_unique_index": "0;0;0;0;0;0;0", + "aff_campus_unique": "Tel Aviv;", + "aff_country_unique_index": "0;0;0;0;0;0;1;1;1;0;1", + "aff_country_unique": "Israel;United States" + }, + { + "id": "2022.emnlp-main.365", + "title": "SEEN: Structured Event Enhancement Network for Explainable Need Detection of Information Recall Assistance", + "track": "main", + "status": "Main", + "award": false, + "abstract": "When recalling life experiences, people often forget or confuse life events, which necessitates information recall services. Previous work on information recall focuses on providing such assistance reactively, i.e., by retrieving the life event of a given query. Proactively detecting the need for information recall services is rarely discussed. In this paper, we use a human-annotated life experience retelling dataset to detect the right time to trigger the information recall service. We propose a pilot model\u2014structured event enhancement network (SEEN) that detects life event inconsistency, additional information in life events, and forgotten events. A fusing mechanism is also proposed to incorporate event graphs of stories and enhance the textual representations. To explain the need detection results, SEEN simultaneously provides support evidence by selecting the related nodes from the event graph. Experimental results show that SEEN achieves promising performance in detecting information needs. In addition, the extracted evidence can be served as complementary information to remind users what events they may want to recall.", + "author": "You-En Lin; An-Zi Yen; Hen-Hsen Huang; Hsin-Hsi Chen", + "authorids": "/y/you-en-lin/; /a/an-zi-yen/; /h/hen-hsen-huang/; /h/hsin-hsi-chen/", + "bibtex": "@inproceedings{lin-etal-2022-seen,\n title = \"{SEEN}: Structured Event Enhancement Network for Explainable Need Detection of Information Recall Assistance\",\n author = \"Lin, You-En and\n Yen, An-Zi and\n Huang, Hen-Hsen and\n Chen, Hsin-Hsi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.365/\",\n doi = \"10.18653/v1/2022.emnlp-main.365\",\n pages = \"5438--5451\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.365.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.365/", + "pdf_size": 535405, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10617082005805958174&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science and Information Engineering, National Taiwan University, Taiwan; Department of Computer Science, National Yang Ming Chiao Tung University, Taiwan; Institute of Information Science, Academia Sinica, Taiwan; Department of Computer Science and Information Engineering, National Taiwan University, Taiwan", + "aff_domain": "nlg.csie.ntu.edu.tw;nycu.edu.tw;iis.sinica.edu.tw;ntu.edu.tw", + "email": "nlg.csie.ntu.edu.tw;nycu.edu.tw;iis.sinica.edu.tw;ntu.edu.tw", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "National Taiwan University;National Yang Ming Chiao Tung University;Academia Sinica", + "aff_unique_dep": "Department of Computer Science and Information Engineering;Department of Computer Science;Institute of Information Science", + "aff_unique_url": "https://www.ntu.edu.tw;https://www.nctu.edu.tw;https://www.sinica.edu.tw", + "aff_unique_abbr": "NTU;NYCU;AS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Taiwan, China" + }, + { + "id": "2022.emnlp-main.49", + "title": "SEM-F1: an Automatic Way for Semantic Evaluation of Multi-Narrative Overlap Summaries at Scale", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work has introduced an important yet relatively under-explored NLP task called Semantic Overlap Summarization (SOS) that entails generating a summary from multiple alternative narratives which conveys the common information provided by those narratives. Previous work also published a benchmark dataset for this task by collecting 2,925 alternative narrative pairs from the web and manually annotating 411 different reference summaries by engaging human annotators. In this paper, we exclusively focus on the automated evaluation of the SOS task using the benchmark dataset. More specifically, we first use the popular ROUGE metric from text-summarization literature and conduct a systematic study to evaluate the SOS task. Our experiments discover that ROUGE is not suitable for this novel task and therefore, we propose a new sentence-level precision-recall style automated evaluation metric, called SEM-F1 (Semantic F1). It is inspired by the benefits of the sentence-wise annotation technique using overlap labels reported by the previous work. Our experiments show that the proposed SEM-F1 metric yields a higher correlation with human judgment and higher inter-rater agreement compared to the ROUGE metric.", + "author": "Naman Bansal; Mousumi Akter; Shubhra Kanti Karmaker Santu", + "authorids": "/n/naman-bansal/; /m/mousumi-akter/; /s/shubhra-kanti-karmaker-santu/", + "bibtex": "@inproceedings{bansal-etal-2022-sem,\n title = \"{SEM}-F1: an Automatic Way for Semantic Evaluation of Multi-Narrative Overlap Summaries at Scale\",\n author = \"Bansal, Naman and\n Akter, Mousumi and\n Karmaker Santu, Shubhra Kanti\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.49/\",\n doi = \"10.18653/v1/2022.emnlp-main.49\",\n pages = \"780--792\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.49.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.49/", + "pdf_size": 708372, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1711635260810410575&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Big Data Intelligence (BDI) Lab, Department of Computer Science and Software Engineering, College of Engineering, Auburn University; Big Data Intelligence (BDI) Lab, Department of Computer Science and Software Engineering, College of Engineering, Auburn University; Big Data Intelligence (BDI) Lab, Department of Computer Science and Software Engineering, College of Engineering, Auburn University", + "aff_domain": "auburn.edu;auburn.edu;auburn.edu", + "email": "auburn.edu;auburn.edu;auburn.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Auburn University", + "aff_unique_dep": "Department of Computer Science and Software Engineering", + "aff_unique_url": "https://www.auburn.edu", + "aff_unique_abbr": "Auburn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.510", + "title": "SEMGraph: Incorporating Sentiment Knowledge and Eye Movement into Graph Model for Sentiment Analysis", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This paper investigates the sentiment analysis task from a novel perspective by incorporating sentiment knowledge and eye movement into a graph architecture, aiming to draw the eye movement-based sentiment relationships for learning the sentiment expression of the context. To be specific, we first explore a linguistic probing eye movement paradigm to extract eye movement features based on the close relationship between linguistic features and the early and late processes of human reading behavior. Furthermore, to derive eye movement features with sentiment concepts, we devise a novel weighting strategy to integrate sentiment scores extracted from affective commonsense knowledge into eye movement features, called sentiment-eye movement weights. Then, the sentiment-eye movement weights are exploited to build the sentiment-eye movement guided graph (SEMGraph) model, so as to model the intricate sentiment relationships in the context. Experimental results on two sentiment analysis datasets with eye movement signals and three sentiment analysis datasets without eye movement signals show that the proposed SEMGraph achieves state-of-the-art performance, and can also be directly generalized to those sentiment analysis datasets without eye movement signals.", + "author": "Bingbing Wang; Bin Liang; Jiachen Du; Min Yang; Ruifeng Xu", + "authorids": "/b/bingbing-wang/; /b/bin-liang/; /j/jiachen-du/; /m/min-yang/; /r/ruifeng-xu/", + "bibtex": "@inproceedings{wang-etal-2022-semgraph,\n title = \"{SEMG}raph: Incorporating Sentiment Knowledge and Eye Movement into Graph Model for Sentiment Analysis\",\n author = \"Wang, Bingbing and\n Liang, Bin and\n Du, Jiachen and\n Yang, Min and\n Xu, Ruifeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.510/\",\n doi = \"10.18653/v1/2022.emnlp-main.510\",\n pages = \"7521--7531\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.510.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.510/", + "pdf_size": 743031, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17359568358205879081&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies; SIAT, Chinese Academy of Sciences, Shenzhen, China; Harbin Insitute of Technology, Shenzhen, China+Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies+Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "stu.hit.edu.cn;stu.hit.edu.cn;gmail.com;siat.ac.cn;hit.edu.cn", + "email": "stu.hit.edu.cn;stu.hit.edu.cn;gmail.com;siat.ac.cn;hit.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0+1;0+1;2;0+1+3", + "aff_unique_norm": "Harbin Institute of Technology;Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies;Shenzhen Institute of Advanced Technology;Peng Cheng Laboratory", + "aff_unique_dep": ";Provincial Key Laboratory of Novel Security Intelligence Technologies;;", + "aff_unique_url": "http://en.hhit.edu.cn/;;http://www.siat.ac.cn;", + "aff_unique_abbr": "HIT;;SIAT;", + "aff_campus_unique_index": "0;0;0;0;0+0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0+0;0+0;0;0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.761", + "title": "SHARE: a System for Hierarchical Assistive Recipe Editing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The large population of home cooks with dietary restrictions is under-served by existing cooking resources and recipe generation models. To help them, we propose the task of controllable recipe editing: adapt a base recipe to satisfy a user-specified dietary constraint. This task is challenging, and cannot be adequately solved with human-written ingredient substitution rules or existing end-to-end recipe generation models. We tackle this problem with SHARE: a System for Hierarchical Assistive Recipe Editing, which performs simultaneous ingredient substitution before generating natural-language steps using the edited ingredients. By decoupling ingredient and step editing, our step generator can explicitly integrate the available ingredients. Experiments on the novel RecipePairs dataset\u201483K pairs of similar recipes where each recipe satisfies one of seven dietary constraints\u2014demonstrate that SHARE produces convincing, coherent recipes that are appropriate for a target dietary constraint. We further show through human evaluations and real-world cooking trials that recipes edited by SHARE can be easily followed by home cooks to create appealing dishes.", + "author": "Shuyang Li; Yufei Li; Jianmo Ni; Julian McAuley", + "authorids": "/s/shuyang-li/; /y/yufei-li/; /j/jianmo-ni/; /j/julian-mcauley/", + "bibtex": "@inproceedings{li-etal-2022-share,\n title = \"{SHARE}: a System for Hierarchical Assistive Recipe Editing\",\n author = \"Li, Shuyang and\n Li, Yufei and\n Ni, Jianmo and\n McAuley, Julian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.761/\",\n doi = \"10.18653/v1/2022.emnlp-main.761\",\n pages = \"11077--11090\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.761.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.761/", + "pdf_size": 6600668, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15533717810443989368&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "UC San Diego; UC Riverside; UC San Diego; UC San Diego", + "aff_domain": "meta.com;ucr.edu;google.com;ucsd.edu", + "email": "meta.com;ucr.edu;google.com;ucsd.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of California, San Diego;University of California, Riverside", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucsd.edu;https://www.ucr.edu", + "aff_unique_abbr": "UCSD;UCR", + "aff_campus_unique_index": "0;1;0;0", + "aff_campus_unique": "San Diego;Riverside", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.21", + "title": "SLATE: A Sequence Labeling Approach for Task Extraction from Free-form Inked Content", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "We present SLATE, a sequence labeling approach for extracting tasks from free-form content such as digitally handwritten (or \u201cinked\u201d) notes on a virtual whiteboard. Our approach allows us to create a single, low-latency model to simultaneously perform sentence segmentation and classification of these sentences into task/non-task sentences. SLATE greatly outperforms a baseline two-model (sentence segmentation followed by classification model) approach, achieving a task F1 score of 84.4%, a sentence segmentation (boundary similarity) score of 88.4% and three times lower latency compared to the baseline. Furthermore, we provide insights into tackling challenges of performing NLP on the inking domain. We release both our code and dataset for this novel task.", + "author": "Apurva Gandhi; Ryan Serrao; Biyi Fang; Gilbert Antonius; Jenna Hong; Tra My Nguyen; Sheng Yi; Ehi Nosakhare; Irene Shaffer; Soundararajan Srinivasan", + "authorids": "/a/apurva-gandhi/; /r/ryan-serrao/; /b/biyi-fang/; /g/gilbert-antonius/; /j/jenna-hong/; /t/tra-my-nguyen/; /s/sheng-yi/; /e/ehi-nosakhare/; /i/irene-shaffer/; /s/soundararajan-srinivasan/", + "bibtex": "@inproceedings{gandhi-etal-2022-slate,\n title = \"{SLATE}: A Sequence Labeling Approach for Task Extraction from Free-form Inked Content\",\n author = \"Gandhi, Apurva and\n Serrao, Ryan and\n Fang, Biyi and\n Antonius, Gilbert and\n Hong, Jenna and\n Nguyen, Tra My and\n Yi, Sheng and\n Nosakhare, Ehi and\n Shaffer, Irene and\n Srinivasan, Soundararajan\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.21/\",\n doi = \"10.18653/v1/2022.emnlp-industry.21\",\n pages = \"206--217\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.21.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.21/", + "pdf_size": 2052318, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:FdUKQ7cgyb4J:scholar.google.com/&scioq=SLATE:+A+Sequence+Labeling+Approach+for+Task+Extraction+from+Free-form+Inked+Content&hl=en&as_sdt=0,5", + "gs_version_total": 3, + "aff": ";;;;;;;;;", + "aff_domain": ";;;;;;;;;", + "email": ";;;;;;;;;", + "github": "", + "project": "", + "author_num": 10 + }, + { + "id": "2022.emnlp-main.740", + "title": "SLICER: Sliced Fine-Tuning for Low-Resource Cross-Lingual Transfer for Named Entity Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large multilingual language models generally demonstrate impressive results in zero-shot cross-lingual transfer, yet often fail to successfully transfer to low-resource languages, even for token-level prediction tasks like named entity recognition (NER). In this work, we introduce a simple yet highly effective approach for improving zero-shot transfer for NER to low-resource languages. We observe that NER fine-tuning in the source language decontextualizes token representations, i.e., tokens increasingly attend to themselves. This increased reliance on token information itself, we hypothesize, triggers a type of overfitting to properties that NE tokens within the source languages share, but are generally not present in NE mentions of target languages. As a remedy, we propose a simple yet very effective sliced fine-tuning for NER (SLICER) that forces stronger token contextualization in the Transformer: we divide the transformed token representations and classifier into disjoint slices that are then independently classified during training. We evaluate SLICER on two standard benchmarks for NER that involve low-resource languages, WikiANN and MasakhaNER, and show that it (i) indeed reduces decontextualization (i.e., extent to which NE tokens attend to themselves), consequently (ii) yielding consistent transfer gains, especially prominent for low-resource target languages distant from the source language.", + "author": "Fabian David Schmidt; Ivan Vuli\u0107; Goran Glava\u0161", + "authorids": "/f/fabian-david-schmidt/; /i/ivan-vulic/; /g/goran-glavas/", + "bibtex": "@inproceedings{schmidt-etal-2022-slicer,\n title = \"{SLICER}: Sliced Fine-Tuning for Low-Resource Cross-Lingual Transfer for Named Entity Recognition\",\n author = \"Schmidt, Fabian David and\n Vuli{\\'c}, Ivan and\n Glava{\\v{s}}, Goran\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.740/\",\n doi = \"10.18653/v1/2022.emnlp-main.740\",\n pages = \"10775--10785\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.740.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.740/", + "pdf_size": 684521, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14982750151803388151&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Center For Artificial Intelligence and Data Science, University of W\u00fcrzburg, Germany; Language Technology Lab, University of Cambridge, UK; Center For Artificial Intelligence and Data Science, University of W\u00fcrzburg, Germany", + "aff_domain": "uni-wuerzburg.de;cam.ac.uk;uni-wuerzburg.de", + "email": "uni-wuerzburg.de;cam.ac.uk;uni-wuerzburg.de", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of W\u00fcrzburg;University of Cambridge", + "aff_unique_dep": "Center For Artificial Intelligence and Data Science;Language Technology Lab", + "aff_unique_url": "https://www.uni-wuerzburg.de;https://www.cam.ac.uk", + "aff_unique_abbr": ";Cambridge", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Cambridge", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Germany;United Kingdom" + }, + { + "id": "2022.emnlp-main.305", + "title": "SLING: Sino Linguistic Evaluation of Large Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "To understand what kinds of linguistic knowledge are encoded by pretrained Chinese language models (LMs), we introduce the benchmark of Sino LINGuistics (SLING), which consists of 38K minimal sentence pairs in Mandarin Chinese grouped into 9 high-level linguistic phenomena. Each pair demonstrates the acceptability contrast of a specific syntactic or semantic phenomenon (e.g., The keys are lost vs. The keys is lost), and an LM should assign lower perplexity to the acceptable sentence. In contrast to the CLiMP dataset (Xiang et al., 2021), which also contains Chinese minimal pairs and was created by translating the vocabulary of the English BLiMP dataset, the minimal pairs in SLING are derived primarily by applying syntactic and lexical transformations to naturally-occurring, linguist-annotated sentences from the Chinese Treebank 9.0, thus addressing severe issues in CLiMP\u2019s data generation process. We test 18 publicly available pretrained monolingual (e.g., BERT-base-zh, CPM) and multi-lingual (e.g., mT5, XLM) language models on SLING. Our experiments show that the average accuracy for LMs is far below human performance (69.7% vs. 97.1%), while BERT-base-zh achieves the highest accuracy (84.8%) of all tested LMs, even much larger ones. Additionally, we find that most LMs have a strong gender and number (singular/plural) bias, and they perform better on local phenomena than hierarchical ones.", + "author": "Yixiao Song; Kalpesh Krishna; Rajesh Bhatt; Mohit Iyyer", + "authorids": "/y/yixiao-song/; /k/kalpesh-krishna/; /r/rajesh-bhatt/; /m/mohit-iyyer/", + "bibtex": "@inproceedings{song-etal-2022-sling,\n title = \"{SLING}: {S}ino Linguistic Evaluation of Large Language Models\",\n author = \"Song, Yixiao and\n Krishna, Kalpesh and\n Bhatt, Rajesh and\n Iyyer, Mohit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.305/\",\n doi = \"10.18653/v1/2022.emnlp-main.305\",\n pages = \"4606--4634\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.305.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.305/", + "pdf_size": 1110169, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16286449432320702027&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Department of Linguistics, UMass Amherst; Manning College of Information and Computer Sciences, UMass Amherst; Department of Linguistics, UMass Amherst; Manning College of Information and Computer Sciences, UMass Amherst", + "aff_domain": "umass.edu;cs.umass.edu;umass.edu;cs.umass.edu", + "email": "umass.edu;cs.umass.edu;umass.edu;cs.umass.edu", + "github": "https://github.com/Yixiao-Song/SLING_Data_Code", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "University of Massachusetts Amherst", + "aff_unique_dep": "Department of Linguistics", + "aff_unique_url": "https://www.umass.edu", + "aff_unique_abbr": "UMass Amherst", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Amherst", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.20", + "title": "SMARTAVE: Structured Multimodal Transformer for Product Attribute Value Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Automatic product attribute value extraction refers to the task of identifying values of an attribute from the product information. Product attributes are essential in improving online shopping experience for customers. Most existing methods focus on extracting attribute values from product title and description.However, in many real-world applications, a product is usually represented by multiple modalities beyond title and description, such as product specifications, text and visual information from the product image, etc. In this paper, we propose SMARTAVE, a Structure Mltimodal trAnsformeR for producT Attribute Value Extraction, which jointly encodes the structured product information from multiple modalities. Specifically, in SMARTAVE encoder, we introduce hyper-tokens to represent the modality-level information, and local-tokens to represent the original text and visual inputs. Structured attention patterns are designed among the hyper-tokens and local-tokens for learning effective product representation. The attribute values are then extracted based on the learned embeddings. We conduct extensive experiments on two multimodal product datasets. Experimental results demonstrate the superior performance of the proposed approach over several state-of-the-art methods. Ablation studies validate the effectiveness of the structured attentions in modeling the multimodal product information.", + "author": "Qifan Wang; Li Yang; Jingang Wang; Jitin Krishnan; Bo Dai; Sinong Wang; Zenglin Xu; Madian Khabsa; Hao Ma", + "authorids": "/q/qifan-wang/; /l/li-yang/; /j/jingang-wang/; /j/jitin-krishnan/; /b/bo-dai/; /s/sinong-wang/; /z/zenglin-xu/; /m/madian-khabsa/; /h/hao-ma/", + "bibtex": "@inproceedings{wang-etal-2022-smartave,\n title = \"{SMARTAVE}: Structured Multimodal Transformer for Product Attribute Value Extraction\",\n author = \"Wang, Qifan and\n Yang, Li and\n Wang, Jingang and\n Krishnan, Jitin and\n Dai, Bo and\n Wang, Sinong and\n Xu, Zenglin and\n Khabsa, Madian and\n Ma, Hao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.20/\",\n doi = \"10.18653/v1/2022.findings-emnlp.20\",\n pages = \"263--276\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.20.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.20/", + "pdf_size": 1816971, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14254547031181477291&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;;", + "aff_domain": ";;;;;;;;", + "email": ";;;;;;;;", + "github": "", + "project": "", + "author_num": 9 + }, + { + "id": "2022.findings-emnlp.492", + "title": "SMASH: Improving SMAll Language Models\u2019 Few-SHot Ability with Prompt-Based Distillation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large-scale language models coupled with prompts have shown remarkable performance on few-shot learning. However, through systematic experiments, we find that the few-shot performance of small language models is poor, and using prompts on them brings fewer improvements than on larger ones. In this paper, we propose SMASH, an approach to improve SMAll language models\u2019 few-SHot ability by training on intermediate tasks before prompt-based fine-tuning on downstream tasks. We design intermediate tasks for sentence-pair tasks and sentiment classification tasks by creating training examples with prompt templates similar to downstream tasks using sentences sampled from a large-scale unsupervised corpus, and apply knowledge distillation to distill from outputs of larger pre-trained models as the training objective. We conduct extensive experiments and show that SMASH can make a 6-layer DistilRoBRETa-base achieve comparable performance on few-shot datasets with a 12-layer RoBERTa-base at a low cost.", + "author": "Yueqian Wang; Chang Liu; Kai Chen; Xi Wang; Dongyan Zhao", + "authorids": "/y/yueqian-wang/; /c/chang-liu/; /k/kai-chen/; /x/xi-wang/; /d/dongyan-zhao/", + "bibtex": "@inproceedings{wang-etal-2022-smash,\n title = \"{SMASH}: Improving {SMA}ll Language Models' Few-{SH}ot Ability with Prompt-Based Distillation\",\n author = \"Wang, Yueqian and\n Liu, Chang and\n Chen, Kai and\n Wang, Xi and\n Zhao, Dongyan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.492/\",\n doi = \"10.18653/v1/2022.findings-emnlp.492\",\n pages = \"6608--6619\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.492.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.492/", + "pdf_size": 363158, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5557560986138319835&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Wangxuan Institute of Computer Technology, Peking University + School of Intelligence Science and Technology, Peking University + Center for Data Science, Peking University; Wangxuan Institute of Computer Technology, Peking University + Center for Data Science, Peking University + Institute for Artificial Intelligence, Peking University; School of Economics, Peking University; School of Economics, Peking University; Wangxuan Institute of Computer Technology, Peking University + Center for Data Science, Peking University + Institute for Artificial Intelligence, Peking University + State Key Laboratory of Media Convergence Production Technology and Systems", + "aff_domain": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "email": "pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn;pku.edu.cn", + "github": "https://github.com/yellow-binary-tree/SMASH", + "project": "", + "author_num": 5, + "aff_unique_index": "0+0+0;0+0+0;0;0;0+0+0+1", + "aff_unique_norm": "Peking University;State Key Laboratory of Media Convergence Production Technology and Systems", + "aff_unique_dep": "Wangxuan Institute of Computer Technology;", + "aff_unique_url": "http://www.pku.edu.cn;", + "aff_unique_abbr": "PKU;", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0+0;0+0+0;0;0;0+0+0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.107", + "title": "SMSMix: Sense-Maintained Sentence Mixup for Word Sense Disambiguation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Word Sense Disambiguation (WSD) is an NLP task aimed at determining the correct sense of a word in a sentence from discrete sense choices. Although current systems have attained unprecedented performances for such tasks, the nonuniform distribution of word senses during training generally results in systems performing poorly on rare senses. To this end, we consider data augmentation to increase the frequency of these least frequent senses (LFS) to reduce the distributional bias of senses during training. We propose Sense-Maintained Sentence Mixup (SMSMix), a novel word-level mixup method that maintains the sense of a target word. SMSMix smoothly blends two sentences using mask prediction while preserving the relevant span determined by saliency scores to maintain a specific word\u2019s sense. To the best of our knowledge, this is the first attempt to apply mixup in NLP while preserving the meaning of a specific word. With extensive experiments, we validate that our augmentation method can effectively give more information about rare senses during training with maintained target sense label.", + "author": "Hee Suk Yoon; Eunseop Yoon; John Harvill; Sunjae Yoon; Mark Hasegawa-Johnson; Chang Yoo", + "authorids": "/h/hee-suk-yoon/; /e/eunseop-yoon/; /j/john-harvill/; /s/sunjae-yoon/; /m/mark-hasegawa-johnson/; /c/chang-yoo/", + "bibtex": "@inproceedings{yoon-etal-2022-smsmix,\n title = \"{SMSM}ix: Sense-Maintained Sentence Mixup for Word Sense Disambiguation\",\n author = \"Yoon, Hee Suk and\n Yoon, Eunseop and\n Harvill, John and\n Yoon, Sunjae and\n Hasegawa-Johnson, Mark and\n Yoo, Chang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.107/\",\n doi = \"10.18653/v1/2022.findings-emnlp.107\",\n pages = \"1493--1502\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.107.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.107/", + "pdf_size": 2366417, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10664843493123918400&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Korea Advanced Institute of Science and Technology (KAIST)+University of Illinois at Urbana-Champaign (UIUC); Korea Advanced Institute of Science and Technology (KAIST)+University of Illinois at Urbana-Champaign (UIUC); University of Illinois at Urbana-Champaign (UIUC); Korea Advanced Institute of Science and Technology (KAIST); University of Illinois at Urbana-Champaign (UIUC); Korea Advanced Institute of Science and Technology (KAIST)", + "aff_domain": "kaist.ac.kr;kaist.ac.kr;illinois.edu;kaist.ac.kr;illinois.edu;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;illinois.edu;kaist.ac.kr;illinois.edu;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;1;0;1;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology;University of Illinois at Urbana-Champaign", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.kaist.ac.kr;https://illinois.edu", + "aff_unique_abbr": "KAIST;UIUC", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Urbana-Champaign", + "aff_country_unique_index": "0+1;0+1;1;0;1;0", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "2022.emnlp-main.571", + "title": "SMaLL-100: Introducing Shallow Multilingual Machine Translation Model for Low-Resource Languages", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In recent years, multilingual machine translation models have achieved promising performance on low-resource language pairs by sharing information between similar languages, thus enabling zero-shot translation. To overcome the \u201ccurse of multilinguality\u201d, these models often opt for scaling up the number of parameters, which makes their use in resource-constrained environments challenging. We introduce SMaLL-100, a distilled version of the M2M-100(12B) model, a massively multilingual machine translation model covering 100 languages. We train SMaLL-100 with uniform sampling across all language pairs and therefore focus on preserving the performance of low-resource languages. We evaluate SMaLL-100 on different low-resource benchmarks: FLORES-101, Tatoeba, and TICO-19 and demonstrate that it outperforms previous massively multilingual models of comparable sizes (200-600M) while improving inference latency and memory usage. Additionally, our model achieves comparable results to M2M-100 (1.2B), while being 3.6x smaller and 4.3x faster at inference.", + "author": "Alireza Mohammadshahi; Vassilina Nikoulina; Alexandre Berard; Caroline Brun; James Henderson; Laurent Besacier", + "authorids": "/a/alireza-mohammadshahi/; /v/vassilina-nikoulina/; /a/alexandre-berard/; /c/caroline-brun/; /j/james-henderson/; /l/laurent-besacier/", + "bibtex": "@inproceedings{mohammadshahi-etal-2022-small,\n title = \"{SM}a{LL}-100: Introducing Shallow Multilingual Machine Translation Model for Low-Resource Languages\",\n author = \"Mohammadshahi, Alireza and\n Nikoulina, Vassilina and\n Berard, Alexandre and\n Brun, Caroline and\n Henderson, James and\n Besacier, Laurent\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.571/\",\n doi = \"10.18653/v1/2022.emnlp-main.571\",\n pages = \"8348--8359\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.571.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.571/", + "pdf_size": 397400, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3967073920063528568&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 6, + "aff": "NA VER LABS Europe+IDIAP Research Institute+EPFL; NA VER LABS Europe; NA VER LABS Europe; NA VER LABS Europe; IDIAP Research Institute; NA VER LABS Europe", + "aff_domain": "naverlabs.com;idiap.ch; ; ;idiap.ch; ", + "email": "naverlabs.com;idiap.ch; ; ;idiap.ch; ", + "github": "https://github.com/alirezamshi/small100", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2;0;0;0;1;0", + "aff_unique_norm": "NAVER LABS Europe;IDIAP Research Institute;Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.naverlabs.com/europe;https://www.idiap.ch;https://www.epfl.ch", + "aff_unique_abbr": "NAVER LABS Europe;;EPFL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1+1;0;0;0;1;0", + "aff_country_unique": "Europe;Switzerland" + }, + { + "id": "2022.findings-emnlp.307", + "title": "SMiLE: Schema-augmented Multi-level Contrastive Learning for Knowledge Graph Link Prediction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Link prediction is the task of inferring missing links between entities in knowledge graphs. Embedding-based methods have shown effectiveness in addressing this problem by modeling relational patterns in triples. However, the link prediction task often requires contextual information in entity neighborhoods, while most existing embedding-based methods fail to capture it. Additionally, little attention is paid to the diversity of entity representations in different contexts, which often leads to false prediction results. In this situation, we consider that the schema of knowledge graph contains the specific contextual information, and it is beneficial for preserving the consistency of entities across contexts. In this paper, we propose a novel Schema-augmented Multi-level contrastive LEarning framework (SMiLE) to conduct knowledge graph link prediction. Specifically, we first exploit network schema as the prior constraint to sample negatives and pre-train our model by employing a multi-level contrastive learning method to yield both prior schema and contextual information. Then we fine-tune our model under the supervision of individual triples to learn subtler representations for link prediction. Extensive experimental results on four knowledge graph datasets with thorough analysis of each component demonstrate the effectiveness of our proposed framework against state-of-the-art baselines. The implementation of SMiLE is available at https://github.com/GKNL/SMiLE.", + "author": "Miao Peng; Ben Liu; Qianqian Xie; Wenjie Xu; Hua Wang; Min Peng", + "authorids": "/m/miao-peng/; /b/ben-liu/; /q/qianqian-xie/; /w/wenjie-xu/; /h/hua-wang/; /m/min-peng/", + "bibtex": "@inproceedings{peng-etal-2022-smile,\n title = \"{SM}i{LE}: Schema-augmented Multi-level Contrastive Learning for Knowledge Graph Link Prediction\",\n author = \"Peng, Miao and\n Liu, Ben and\n Xie, Qianqian and\n Xu, Wenjie and\n Wang, Hua and\n Peng, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.307/\",\n doi = \"10.18653/v1/2022.findings-emnlp.307\",\n pages = \"4165--4177\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.307.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.307/", + "pdf_size": 755870, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10280590721555578645&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "School of Computer Science, Wuhan University, China; School of Computer Science, Wuhan University, China; Department of Computer Science, The University of Manchester, United Kingdom; School of Computer Science, Wuhan University, China; Centre for Applied Informatics, Victoria University, Australia; School of Computer Science, Wuhan University, China", + "aff_domain": "whu.edu.cn;whu.edu.cn;manchester.ac.uk;whu.edu.cn;vu.edu.au;whu.edu.cn", + "email": "whu.edu.cn;whu.edu.cn;manchester.ac.uk;whu.edu.cn;vu.edu.au;whu.edu.cn", + "github": "https://github.com/GKNL/SMiLE", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;0;2;0", + "aff_unique_norm": "Wuhan University;The University of Manchester;Victoria University", + "aff_unique_dep": "School of Computer Science;Department of Computer Science;Centre for Applied Informatics", + "aff_unique_url": "http://www.whu.edu.cn;https://www.manchester.ac.uk;https://www.vu.edu.au", + "aff_unique_abbr": "WHU;UoM;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Wuhan;", + "aff_country_unique_index": "0;0;1;0;2;0", + "aff_country_unique": "China;United Kingdom;Australia" + }, + { + "id": "2022.emnlp-main.29", + "title": "SNaC: Coherence Error Detection for Narrative Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Progress in summarizing long texts is inhibited by the lack of appropriate evaluation frameworks. A long summary that appropriately covers the facets of that text must also present a coherent narrative, but current automatic and human evaluation methods fail to identify gaps in coherence. In this work, we introduce SNaC, a narrative coherence evaluation framework for fine-grained annotations of long summaries. We develop a taxonomy of coherence errors in generated narrative summaries and collect span-level annotations for 6.6k sentences across 150 book and movie summaries. Our work provides the first characterization of coherence errors generated by state-of-the-art summarization models and a protocol for eliciting coherence judgments from crowdworkers. Furthermore, we show that the collected annotations allow us to benchmark past work in coherence modeling and train a strong classifier for automatically localizing coherence errors in generated summaries. Finally, our SNaC framework can support future work in long document summarization and coherence evaluation, including improved summarization modeling and post-hoc summary correction.", + "author": "Tanya Goyal; Junyi Jessy Li; Greg Durrett", + "authorids": "/t/tanya-goyal/; /j/junyi-jessy-li/; /g/greg-durrett/", + "bibtex": "@inproceedings{goyal-etal-2022-snac,\n title = \"{SN}a{C}: Coherence Error Detection for Narrative Summarization\",\n author = \"Goyal, Tanya and\n Li, Junyi Jessy and\n Durrett, Greg\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.29/\",\n doi = \"10.18653/v1/2022.emnlp-main.29\",\n pages = \"444--463\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.29.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.29/", + "pdf_size": 4781109, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10474999073552596026&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science; Department of Linguistics; Department of Computer Science", + "aff_domain": "utexas.edu; ; ", + "email": "utexas.edu; ; ", + "github": "https://github.com/tagoyal/snac", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Unknown Institution;University Affiliation Not Specified", + "aff_unique_dep": "Department of Computer Science;Department of Linguistics", + "aff_unique_url": ";", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "", + "aff_country_unique": "" + }, + { + "id": "2022.emnlp-main.803", + "title": "SPE: Symmetrical Prompt Enhancement for Fact Probing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pretrained language models (PLMs) have been shown to accumulate factual knowledge during pretraining (Petroni et al. 2019). Recent works probe PLMs for the extent of this knowledge through prompts either in discrete or continuous forms. However, these methods do not consider symmetry of the task: object prediction and subject prediction. In this work, we propose Symmetrical Prompt Enhancement (SPE), a continuous prompt-based method for factual probing in PLMs that leverages the symmetry of the task by constructing symmetrical prompts for subject and object prediction. Our results on a popular factual probing dataset, LAMA, show significant improvement of SPE over previous probing methods.", + "author": "Yiyuan Li; Tong Che; Yezhen Wang; Zhengbao Jiang; Caiming Xiong; Snigdha Chaturvedi", + "authorids": "/y/yiyuan-li/; /t/tong-che/; /y/yezhen-wang/; /z/zhengbao-jiang/; /c/caiming-xiong/; /s/snigdha-chaturvedi/", + "bibtex": "@inproceedings{li-etal-2022-spe,\n title = \"{SPE}: Symmetrical Prompt Enhancement for Fact Probing\",\n author = \"Li, Yiyuan and\n Che, Tong and\n Wang, Yezhen and\n Jiang, Zhengbao and\n Xiong, Caiming and\n Chaturvedi, Snigdha\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.803/\",\n doi = \"10.18653/v1/2022.emnlp-main.803\",\n pages = \"11689--11698\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.803.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.803/", + "pdf_size": 339911, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9649300082562663660&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "UNC-Chapel Hill; NVIDIA+Mila-Quebec AI Institute; Mila-Quebec AI Institute; Carnegie Mellon University; Salesforce Research; UNC-Chapel Hill", + "aff_domain": "cs.unc.edu;nvidia.com;mila.quebec;cs.cmu.edu;salesforce.com;cs.unc.edu", + "email": "cs.unc.edu;nvidia.com;mila.quebec;cs.cmu.edu;salesforce.com;cs.unc.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1+2;2;3;4;0", + "aff_unique_norm": "University of North Carolina at Chapel Hill;NVIDIA Corporation;Mila-Quebec AI Institute;Carnegie Mellon University;Salesforce", + "aff_unique_dep": ";;AI Institute;;Salesforce Research", + "aff_unique_url": "https://www.unc.edu;https://www.nvidia.com;https://mila.quebec;https://www.cmu.edu;https://research.salesforce.com", + "aff_unique_abbr": "UNC;NVIDIA;Mila;CMU;Salesforce", + "aff_campus_unique_index": "0;;0", + "aff_campus_unique": "Chapel Hill;", + "aff_country_unique_index": "0;0+1;1;0;0;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "2022.emnlp-main.107", + "title": "SQUIRE: A Sequence-to-sequence Framework for Multi-hop Knowledge Graph Reasoning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multi-hop knowledge graph (KG) reasoning has been widely studied in recent years to provide interpretable predictions on missing links with evidential paths. Most previous works use reinforcement learning (RL) based methods that learn to navigate the path towards the target entity. However, these methods suffer from slow and poor convergence, and they may fail to infer a certain path when there is a missing edge along the path. Here we present SQUIRE, the first Sequence-to-sequence based multi-hop reasoning framework, which utilizes an encoder-decoder Transformer structure to translate the query to a path. Our framework brings about two benefits: (1) It can learn and predict in an end-to-end fashion, which gives better and faster convergence; (2) Our transformer model does not rely on existing edges to generate the path, and has the flexibility to complete missing edges along the path, especially in sparse KGs. Experiments on standard and sparse KGs show that our approach yields significant improvement over prior methods, while converging 4x-7x faster.", + "author": "Yushi Bai; Xin Lv; Juanzi Li; Lei Hou; Yincen Qu; Zelin Dai; Feiyu Xiong", + "authorids": "/y/yushi-bai/; /x/xin-lv/; /j/juanzi-li/; /l/lei-hou/; /y/yincen-qu/; /z/zelin-dai/; /f/feiyu-xiong/", + "bibtex": "@inproceedings{bai-etal-2022-squire,\n title = \"{SQUIRE}: A Sequence-to-sequence Framework for Multi-hop Knowledge Graph Reasoning\",\n author = \"Bai, Yushi and\n Lv, Xin and\n Li, Juanzi and\n Hou, Lei and\n Qu, Yincen and\n Dai, Zelin and\n Xiong, Feiyu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.107/\",\n doi = \"10.18653/v1/2022.emnlp-main.107\",\n pages = \"1649--1662\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.107.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.107/", + "pdf_size": 496195, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6654011157998740430&as_sdt=40000005&sciodt=0,22&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing 100084, China; Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing 100084, China; Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing 100084, China; Department of Computer Science and Technology, BNRist + KIRC, Institute for Artificial Intelligence, Tsinghua University, Beijing 100084, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China", + "aff_domain": "mails.tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn; ; ; ; ", + "email": "mails.tsinghua.edu.cn;tsinghua.edu.cn;tsinghua.edu.cn; ; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;0+1;2;2;2", + "aff_unique_norm": "BNRist;Tsinghua University;Alibaba Group", + "aff_unique_dep": "Department of Computer Science and Technology;Institute for Artificial Intelligence;", + "aff_unique_url": ";https://www.tsinghua.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": ";THU;Alibaba", + "aff_campus_unique_index": "1;1;1;1;2;2;2", + "aff_campus_unique": ";Beijing;Hangzhou", + "aff_country_unique_index": "1;1;1;1;1;1;1", + "aff_country_unique": ";China" + }, + { + "id": "2022.emnlp-main.75", + "title": "SQuALITY: Building a Long-Document Summarization Dataset the Hard Way", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Summarization datasets are often assembled either by scraping naturally occurring public-domain summaries\u2014which are nearly always in difficult-to-work-with technical domains\u2014or by using approximate heuristics to extract them from everyday text\u2014which frequently yields unfaithful summaries. In this work, we turn to a slower but more straightforward approach to developing summarization benchmark data: We hire highly-qualified contractors to read stories and write original summaries from scratch. To amortize reading time, we collect five summaries per document, with the first giving an overview and the subsequent four addressing specific questions. We use this protocol to collect SQuALITY, a dataset of question-focused summaries built on the same public-domain short stories as the multiple-choice dataset QuALITY (Pang et al., 2021). Experiments with state-of-the-art summarization systems show that our dataset is challenging and that existing automatic evaluation metrics are weak indicators of quality.", + "author": "Alex Wang; Richard Yuanzhe Pang; Angelica Chen; Jason Phang; Samuel R. Bowman", + "authorids": "/a/alex-wang/; /r/richard-yuanzhe-pang/; /a/angelica-chen/; /j/jason-phang/; /s/samuel-bowman/", + "bibtex": "@inproceedings{wang-etal-2022-squality,\n title = \"{SQ}u{ALITY}: Building a Long-Document Summarization Dataset the Hard Way\",\n author = \"Wang, Alex and\n Pang, Richard Yuanzhe and\n Chen, Angelica and\n Phang, Jason and\n Bowman, Samuel R.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.75/\",\n doi = \"10.18653/v1/2022.emnlp-main.75\",\n pages = \"1139--1156\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.75.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.75/", + "pdf_size": 570401, + "gs_citation": 68, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4956639035891336309&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "New York University; New York University; New York University; New York University; New York University", + "aff_domain": "nyu.edu;nyu.edu; ; ;nyu.edu", + "email": "nyu.edu;nyu.edu; ; ;nyu.edu", + "github": "https://github.com/nyu-mll/SQuALITY", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "New York University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.nyu.edu", + "aff_unique_abbr": "NYU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.89", + "title": "STAR: SQL Guided Pre-Training for Context-dependent Text-to-SQL Parsing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this paper, we propose a novel SQL guided pre-training framework STAR for context-dependent text-to-SQL parsing, which leverages contextual information to enrich natural language (NL) utterance and table schema representations for text-to-SQL conversations. Concretely, we propose two novel pre-training objectives which respectively explore the context-dependent interactions of NL utterances and SQL queries within each text-to-SQL conversation: (i) schema state tracking (SST) objective that tracks and explores the schema states of context-dependent SQL queries in the form of schema-states by predicting and updating the value of each schema slot during interaction; (ii) utterance dependency tracking (UDT) objective that employs weighted contrastive learning to pull together two semantically similar NL utterances and push away the representations of semantically dissimilar NL utterances within each conversation. In addition, we construct a high-quality large-scale context-dependent text-to-SQL conversation corpus to pre-train STAR. Extensive experiments show that STAR achieves new state-of-the-art performance on two downstream benchmarks (SParC and CoSQL), significantly outperforming previous pre-training methods and ranking first on the leaderboard. We believe the release of the constructed corpus, codebase and pre-trained STAR checkpoints would push forward the research in this area.", + "author": "Zefeng Cai; Xiangyu Li; Binyuan Hui; Min Yang; Bowen Li; Binhua Li; Zheng Cao; Weijie Li; Fei Huang; Luo Si; Yongbin Li", + "authorids": "/z/zefeng-cai/; /x/xiangyu-li/; /b/binyuan-hui/; /m/min-yang/; /b/bowen-li/; /b/binhua-li/; /z/zheng-cao/; /w/weijie-li/; /f/fei-huang/; /l/luo-si/; /y/yongbin-li/", + "bibtex": "@inproceedings{cai-etal-2022-star,\n title = \"{STAR}: {SQL} Guided Pre-Training for Context-dependent Text-to-{SQL} Parsing\",\n author = \"Cai, Zefeng and\n Li, Xiangyu and\n Hui, Binyuan and\n Yang, Min and\n Li, Bowen and\n Li, Binhua and\n Cao, Zheng and\n Li, Weijie and\n Huang, Fei and\n Si, Luo and\n Li, Yongbin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.89/\",\n doi = \"10.18653/v1/2022.findings-emnlp.89\",\n pages = \"1235--1247\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.89.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.89/", + "pdf_size": 826180, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5665452259150592084&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "University of Science and Technology of China; Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences; DAMO Academy, Alibaba Group; Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group", + "aff_domain": "siat.ac.cn;siat.ac.cn;siat.ac.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com; ; ; ; ; ", + "email": "siat.ac.cn;siat.ac.cn;siat.ac.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com; ; ; ; ; ", + "github": "https://github.com/AlibabaResearch/DAMO-ConvAI/tree/main/star", + "project": "", + "author_num": 11, + "aff_unique_index": "0;1;2;1;2;2;2;2;2;2;2", + "aff_unique_norm": "University of Science and Technology of China;Shenzhen Institute of Advanced Technology;Alibaba Group", + "aff_unique_dep": ";;DAMO Academy", + "aff_unique_url": "http://www.ustc.edu.cn;http://www.siat.cas.cn;https://www.alibaba-group.com", + "aff_unique_abbr": "USTC;SIAT;Alibaba", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.515", + "title": "STGN: an Implicit Regularization Method for Learning with Noisy Labels in Natural Language Processing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Noisy labels are ubiquitous in natural language processing (NLP) tasks. Existing work, namely learning with noisy labels in NLP, is often limited to dedicated tasks or specific training procedures, making it hard to be widely used. To address this issue, SGD noise has been explored to provide a more general way to alleviate the effect of noisy labels by involving benign noise in the process of stochastic gradient descent. However, previous studies exert identical perturbation for all samples, which may cause overfitting on incorrect ones or optimizing correct ones inadequately. To facilitate this, we propose a novel stochastic tailor-made gradient noise (STGN), mitigating the effect of inherent label noise by introducing tailor-made benign noise for each sample. Specifically, we investigate multiple principles to precisely and stably discriminate correct samples from incorrect ones and thus apply different intensities of perturbation to them. A detailed theoretical analysis shows that STGN has good properties, beneficial for model generalization. Experiments on three different NLP tasks demonstrate the effectiveness and versatility of STGN. Also, STGN can boost existing robust training methods.", + "author": "Tingting Wu; Xiao Ding; Minji Tang; Hao Zhang; Bing Qin; Ting Liu", + "authorids": "/t/tingting-wu/; /x/xiao-ding/; /m/minji-tang/; /h/hao-zhang/; /b/bing-qin/; /t/ting-liu/", + "bibtex": "@inproceedings{wu-etal-2022-stgn,\n title = \"{STGN}: an Implicit Regularization Method for Learning with Noisy Labels in Natural Language Processing\",\n author = \"Wu, Tingting and\n Ding, Xiao and\n Tang, Minji and\n Zhang, Hao and\n Qin, Bing and\n Liu, Ting\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.515/\",\n doi = \"10.18653/v1/2022.emnlp-main.515\",\n pages = \"7587--7598\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.515.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.515/", + "pdf_size": 562192, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2962282973562067255&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 0, + "aff": "Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Faculty of Computing, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, China", + "aff_domain": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "email": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "github": "https://github.com/tangminji/STGN-sst", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Harbin Institute of Technology", + "aff_unique_dep": "Research Center for Social Computing and Information Retrieval", + "aff_unique_url": "http://www.hit.edu.cn/", + "aff_unique_abbr": "HIT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.329", + "title": "STRUDEL: Structured Dialogue Summarization for Dialogue Comprehension", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Abstractive dialogue summarization has long been viewed as an important standalone task in natural language processing, but no previous work has explored the possibility of whether abstractive dialogue summarization can also be used as a means to boost an NLP system\u2019s performance on other important dialogue comprehension tasks. In this paper, we propose a novel type of dialogue summarization task - STRUctured DiaLoguE Summarization (STRUDEL) - that can help pre-trained language models to better understand dialogues and improve their performance on important dialogue comprehension tasks. In contrast to the holistic approach taken by the traditional free-form abstractive summarization task for dialogues, STRUDEL aims to decompose and imitate the hierarchical, systematic and structured mental process that we human beings usually go through when understanding and analyzing dialogues, and thus has the advantage of being more focused, specific and instructive for dialogue comprehension models to learn from. We further introduce a new STRUDEL dialogue comprehension modeling framework that integrates STRUDEL into a dialogue reasoning module over transformer encoder language models to improve their dialogue comprehension ability. In our empirical experiments on two important downstream dialogue comprehension tasks - dialogue question answering and dialogue response prediction - we demonstrate that our STRUDEL dialogue comprehension models can significantly improve the dialogue comprehension performance of transformer encoder language models.", + "author": "Borui Wang; Chengcheng Feng; Arjun Nair; Madelyn Mao; Jai Desai; Asli Celikyilmaz; Haoran Li; Yashar Mehdad; Dragomir Radev", + "authorids": "/b/borui-wang/; /c/chengcheng-feng/; /a/arjun-nair/; /m/madelyn-mao/; /j/jai-desai/; /a/asli-celikyilmaz/; /h/haoran-li/; /y/yashar-mehdad/; /d/dragomir-radev/", + "bibtex": "@inproceedings{wang-etal-2022-strudel,\n title = \"{STRUDEL}: Structured Dialogue Summarization for Dialogue Comprehension\",\n author = \"Wang, Borui and\n Feng, Chengcheng and\n Nair, Arjun and\n Mao, Madelyn and\n Desai, Jai and\n Celikyilmaz, Asli and\n Li, Haoran and\n Mehdad, Yashar and\n Radev, Dragomir\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.329/\",\n doi = \"10.18653/v1/2022.emnlp-main.329\",\n pages = \"4949--4958\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.329.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.329/", + "pdf_size": 4144411, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10190687203066515818&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 5, + "aff": "Yale University; Yale University; Yale University; Yale University; Yale University; Meta AI; Meta AI; Meta AI; Yale University", + "aff_domain": "yale.edu;yale.edu; ; ; ;fb.com;fb.com;fb.com; ", + "email": "yale.edu;yale.edu; ; ; ;fb.com;fb.com;fb.com; ", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;1;1;1;0", + "aff_unique_norm": "Yale University;Meta Platforms, Inc.", + "aff_unique_dep": ";Meta AI", + "aff_unique_url": "https://www.yale.edu;https://meta.com", + "aff_unique_abbr": "Yale;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.284", + "title": "SYGMA: A System for Generalizable and Modular Question Answering Over Knowledge Bases", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Knowledge Base Question Answering (KBQA) involving complex reasoning is emerging as an important research direction. However, most KBQA systems struggle with generalizability, particularly on two dimensions: (a) across multiple knowledge bases, where existing KBQA approaches are typically tuned to a single knowledge base, and (b) across multiple reasoning types, where majority of datasets and systems have primarily focused on multi-hop reasoning. In this paper, we present SYGMA, a modular KBQA approach developed with goal of generalization across multiple knowledge bases and multiple reasoning types. To facilitate this, SYGMA is designed as two high level modules: 1) KB-agnostic question understanding module that remain common across KBs, and generates logic representation of the question with high level reasoning constructs that are extensible, and 2) KB-specific question mapping and answering module to address the KB-specific aspects of the answer extraction. We evaluated SYGMA on multiple datasets belonging to distinct knowledge bases (DBpedia and Wikidata) and distinct reasoning types (multi-hop and temporal). State-of-the-art or competitive performances achieved on those datasets demonstrate its generalization capability.", + "author": "Sumit Neelam; Udit Sharma; Hima Karanam; Shajith Ikbal; Pavan Kapanipathi; Ibrahim Abdelaziz; Nandana Mihindukulasooriya; Young-Suk Lee; Santosh Srivastava; Cezar Pendus; Saswati Dana; Dinesh Garg; Achille Fokoue; G P Shrivatsa Bhargav; Dinesh Khandelwal; Srinivas Ravishankar; Sairam Gurajada; Maria Chang; Rosario Uceda-Sosa; Salim Roukos; Alexander Gray; Guilherme Lima; Ryan Riegel; Francois Luus; L V Subramaniam", + "authorids": "/s/sumit-neelam/; /u/udit-sharma/; /h/hima-karanam/; /s/shajith-ikbal/; /p/pavan-kapanipathi/; /i/ibrahim-abdelaziz/; /n/nandana-mihindukulasooriya/; /y/young-suk-lee/; /s/santosh-srivastava/; /c/cezar-pendus/; /s/saswati-dana/; /d/dinesh-garg/; /a/achille-fokoue-nkoutche/; /g/g-p-shrivatsa-bhargav/; /d/dinesh-khandelwal/; /s/srinivas-ravishankar/; /s/sairam-gurajada/; /m/maria-chang/; /r/rosario-uceda-sosa/; /s/salim-roukos/; /a/alexander-gray/; /g/guilherme-lima/; /r/ryan-riegel/; /f/francois-luus/; /l/l-v-subramaniam/", + "bibtex": "@inproceedings{neelam-etal-2022-sygma,\n title = \"{SYGMA}: A System for Generalizable and Modular Question Answering Over Knowledge Bases\",\n author = \"Neelam, Sumit and\n Sharma, Udit and\n Karanam, Hima and\n Ikbal, Shajith and\n Kapanipathi, Pavan and\n Abdelaziz, Ibrahim and\n Mihindukulasooriya, Nandana and\n Lee, Young-Suk and\n Srivastava, Santosh and\n Pendus, Cezar and\n Dana, Saswati and\n Garg, Dinesh and\n Fokoue, Achille and\n Bhargav, G P Shrivatsa and\n Khandelwal, Dinesh and\n Ravishankar, Srinivas and\n Gurajada, Sairam and\n Chang, Maria and\n Uceda-Sosa, Rosario and\n Roukos, Salim and\n Gray, Alexander and\n Lima, Guilherme and\n Riegel, Ryan and\n Luus, Francois and\n Subramaniam, L V\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.284/\",\n doi = \"10.18653/v1/2022.findings-emnlp.284\",\n pages = \"3866--3879\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.284.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.284/", + "pdf_size": 468054, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8258930349720859918&as_sdt=80005&sciodt=0,11&hl=en", + "gs_version_total": 9, + "aff": ";;;;;;;;;;;;;;;;;;;;;;;;", + "aff_domain": ";;;;;;;;;;;;;;;;;;;;;;;;", + "email": ";;;;;;;;;;;;;;;;;;;;;;;;", + "github": "", + "project": "", + "author_num": 25 + }, + { + "id": "2022.emnlp-main.154", + "title": "SafeText: A Benchmark for Exploring Physical Safety in Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Understanding what constitutes safe text is an important issue in natural language processing and can often prevent the deployment of models deemed harmful and unsafe. One such type of safety that has been scarcely studied is commonsense physical safety, i.e. text that is not explicitly violent and requires additional commonsense knowledge to comprehend that it leads to physical harm. We create the first benchmark dataset, SafeText, comprising real-life scenarios with paired safe and physically unsafe pieces of advice. We utilize SafeText to empirically study commonsense physical safety across various models designed for text generation and commonsense reasoning tasks. We find that state-of-the-art large language models are susceptible to the generation of unsafe text and have difficulty rejecting unsafe advice. As a result, we argue for further studies of safety and the assessment of commonsense physical safety in models before release.", + "author": "Sharon Levy; Emily Allaway; Melanie Subbiah; Lydia Chilton; Desmond Patton; Kathleen McKeown; William Yang Wang", + "authorids": "/s/sharon-levy/; /e/emily-allaway/; /m/melanie-subbiah/; /l/lydia-chilton/; /d/desmond-patton/; /k/kathleen-mckeown/; /w/william-yang-wang/", + "bibtex": "@inproceedings{levy-etal-2022-safetext,\n title = \"{S}afe{T}ext: A Benchmark for Exploring Physical Safety in Language Models\",\n author = \"Levy, Sharon and\n Allaway, Emily and\n Subbiah, Melanie and\n Chilton, Lydia and\n Patton, Desmond and\n McKeown, Kathleen and\n Wang, William Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.154/\",\n doi = \"10.18653/v1/2022.emnlp-main.154\",\n pages = \"2407--2421\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.154.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.154/", + "pdf_size": 2289477, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2196653696402420376&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "University of California, Santa Barbara; Columbia University; Columbia University; Columbia University; University of Pennsylvania; Columbia University; University of California, Santa Barbara", + "aff_domain": "cs.ucsb.edu;cs.columbia.edu;cs.columbia.edu;cs.columbia.edu;upenn.edu;cs.columbia.edu;cs.ucsb.edu", + "email": "cs.ucsb.edu;cs.columbia.edu;cs.columbia.edu;cs.columbia.edu;upenn.edu;cs.columbia.edu;cs.ucsb.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;1;2;1;0", + "aff_unique_norm": "University of California, Santa Barbara;Columbia University;University of Pennsylvania", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.ucsb.edu;https://www.columbia.edu;https://www.upenn.edu", + "aff_unique_abbr": "UCSB;Columbia;UPenn", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Santa Barbara;", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.409", + "title": "Salience Allocation as Guidance for Abstractive Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Abstractive summarization models typically learn to capture the salient information from scratch implicitly.Recent literature adds extractive summaries as guidance for abstractive summarization models to provide hints of salient content and achieves better performance.However, extractive summaries as guidance could be over strict, leading to information loss or noisy signals.Furthermore, it cannot easily adapt to documents with various abstractiveness.As the number and allocation of salience content pieces varies, it is hard to find a fixed threshold deciding which content should be included in the guidance.In this paper, we propose a novel summarization approach with a flexible and reliable salience guidance, namely SEASON (SaliencE Allocation as Guidance for Abstractive SummarizatiON).SEASON utilizes the allocation of salience expectation to guide abstractive summarization and adapts well to articles in different abstractiveness.Automatic and human evaluations on two benchmark datasets show that the proposed method is effective and reliable.Empirical results on more than one million news articles demonstrate a natural fifteen-fifty salience split for news article sentences, providing a useful insight for composing news articles.", + "author": "Fei Wang; Kaiqiang Song; Hongming Zhang; Lifeng Jin; Sangwoo Cho; Wenlin Yao; Xiaoyang Wang; Muhao Chen; Dong Yu", + "authorids": "/f/fei-wang/; /k/kaiqiang-song/; /h/hongming-zhang/; /l/lifeng-jin/; /s/sangwoo-cho/; /w/wenlin-yao/; /x/xiaoyang-wang/; /m/muhao-chen/; /d/dong-yu/", + "bibtex": "@inproceedings{wang-etal-2022-salience,\n title = \"Salience Allocation as Guidance for Abstractive Summarization\",\n author = \"Wang, Fei and\n Song, Kaiqiang and\n Zhang, Hongming and\n Jin, Lifeng and\n Cho, Sangwoo and\n Yao, Wenlin and\n Wang, Xiaoyang and\n Chen, Muhao and\n Yu, Dong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.409/\",\n doi = \"10.18653/v1/2022.emnlp-main.409\",\n pages = \"6094--6106\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.409.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.409/", + "pdf_size": 384829, + "gs_citation": 34, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11130696208568291970&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "University of Southern California+Tecent AI Lab, Seattle; Tecent AI Lab, Seattle; Tecent AI Lab, Seattle; Tecent AI Lab, Seattle; Tecent AI Lab, Seattle; Tecent AI Lab, Seattle; Tecent AI Lab, Seattle; University of Southern California+Tecent AI Lab, Seattle; Tecent AI Lab, Seattle", + "aff_domain": "usc.edu;usc.edu;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com", + "email": "usc.edu;usc.edu;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com", + "github": "https://github.com/tencent-ailab/season", + "project": "", + "author_num": 9, + "aff_unique_index": "0+1;1;1;1;1;1;1;0+1;1", + "aff_unique_norm": "University of Southern California;Tencent", + "aff_unique_dep": ";Tencent AI Lab", + "aff_unique_url": "https://www.usc.edu;https://ai.tencent.com", + "aff_unique_abbr": "USC;Tencent AI Lab", + "aff_campus_unique_index": "0+1;1;1;1;1;1;1;0+1;1", + "aff_campus_unique": "Los Angeles;Seattle", + "aff_country_unique_index": "0+0;0;0;0;0;0;0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.19", + "title": "Salient Phrase Aware Dense Retrieval: Can a Dense Retriever Imitate a Sparse One?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Despite their recent popularity and well-known advantages, dense retrievers still lag behind sparse methods such as BM25 in their ability to reliably match salient phrases and rare entities in the query and to generalize to out-of-domain data. It has been argued that this is an inherent limitation of dense models. We rebut this claim by introducing the Salient Phrase Aware Retriever (SPAR), a dense retriever with the lexical matching capacity of a sparse model. We show that a dense Lexical Model \u039b can be trained to imitate a sparse one, and SPAR is built by augmenting a standard dense retriever with \u039b. Empirically, SPAR shows superior performance on a range of tasks including five question answering datasets, MS MARCO passage retrieval, as well as the EntityQuestions and BEIR benchmarks for out-of-domain evaluation, exceeding the performance of state-of-the-art dense and sparse retrievers. The code and models of SPAR are available at: https://github.com/facebookresearch/dpr-scale/tree/main/spar", + "author": "Xilun Chen; Kushal Lakhotia; Barlas Oguz; Anchit Gupta; Patrick Lewis; Stan Peshterliev; Yashar Mehdad; Sonal Gupta; Wen-tau Yih", + "authorids": "/x/xilun-chen/; /k/kushal-lakhotia/; /b/barlas-oguz/; /a/anchit-gupta/; /p/patrick-lewis/; /s/stan-peshterliev/; /y/yashar-mehdad/; /s/sonal-gupta/; /w/wen-tau-yih/", + "bibtex": "@inproceedings{chen-etal-2022-salient,\n title = \"Salient Phrase Aware Dense Retrieval: Can a Dense Retriever Imitate a Sparse One?\",\n author = \"Chen, Xilun and\n Lakhotia, Kushal and\n Oguz, Barlas and\n Gupta, Anchit and\n Lewis, Patrick and\n Peshterliev, Stan and\n Mehdad, Yashar and\n Gupta, Sonal and\n Yih, Wen-tau\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.19/\",\n doi = \"10.18653/v1/2022.findings-emnlp.19\",\n pages = \"250--262\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.19.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.19/", + "pdf_size": 1516538, + "gs_citation": 71, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7338555420554305904&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI", + "aff_domain": "meta.com;gmail.com;meta.com;meta.com;meta.com;meta.com;meta.com;meta.com;meta.com", + "email": "meta.com;gmail.com;meta.com;meta.com;meta.com;meta.com;meta.com;meta.com;meta.com", + "github": "https://github.com/facebookresearch/dpr-scale/tree/main/spar", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Meta Platforms, Inc.", + "aff_unique_dep": "Meta AI", + "aff_unique_url": "https://meta.com", + "aff_unique_abbr": "Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.754", + "title": "Sampling-Based Approximations to Minimum Bayes Risk Decoding for Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In NMT we search for the mode of the model distribution to form predictions. The mode and other high-probability translations found by beam search have been shown to often be inadequate in a number of ways. This prevents improving translation quality through better search, as these idiosyncratic translations end up selected by the decoding algorithm, a problem known as the beam search curse. Recently, an approximation to minimum Bayes risk (MBR) decoding has been proposed as an alternative decision rule that would likely not suffer from the same problems. We analyse this approximation and establish that it has no equivalent to the beam search curse. We then design approximations that decouple the cost of exploration from the cost of robust estimation of expected utility. This allows for much larger hypothesis spaces, which we show to be beneficial. We also show that mode-seeking strategies can aid in constructing compact sets of promising hypotheses and that MBR is effective in identifying good translations in them. We conduct experiments on three language pairs varying in amounts of resources available: English into and from German, Romanian, and Nepali.", + "author": "Bryan Eikema; Wilker Aziz", + "authorids": "/b/bryan-eikema/; /w/wilker-aziz/", + "bibtex": "@inproceedings{eikema-aziz-2022-sampling,\n title = \"Sampling-Based Approximations to Minimum {B}ayes Risk Decoding for Neural Machine Translation\",\n author = \"Eikema, Bryan and\n Aziz, Wilker\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.754/\",\n doi = \"10.18653/v1/2022.emnlp-main.754\",\n pages = \"10978--10993\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.754.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.754/", + "pdf_size": 1689409, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13026770156206249123&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 5, + "aff": "University of Amsterdam; University of Amsterdam", + "aff_domain": "uva.nl;uva.nl", + "email": "uva.nl;uva.nl", + "github": "github.com/roxot/mbr-nmt", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Amsterdam", + "aff_unique_dep": "", + "aff_unique_url": "https://www.uva.nl", + "aff_unique_abbr": "UvA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Netherlands" + }, + { + "id": "2022.findings-emnlp.387", + "title": "Sarcasm Detection is Way Too Easy! An Empirical Comparison of Human and Machine Sarcasm Detection", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recently, author-annotated sarcasm datasets, which focus on intended, rather than perceived sarcasm, have been introduced. Although datasets collected using first-party annotation have important benefits, there is no comparison of human and machine performance on these new datasets. In this paper, we collect new annotations to provide human-level benchmarks for these first-party annotated sarcasm tasks in both English and Arabic, and compare the performance of human annotators to that of state-of-the-art sarcasm detection systems. Our analysis confirms that sarcasm detection is extremely challenging, with individual humans performing close to or slightly worse than the best trained models. With majority voting, however, humans are able to achieve the best results on all tasks. We also perform error analysis, finding that some of the most challenging examples are those that require additional context. We also highlight common features and patterns used to express sarcasm in English and Arabic such as idioms and proverbs. We suggest that to better capture sarcasm, future sarcasm detection datasets and models should focus on representing conversational and cultural context while leveraging world knowledge and common sense.", + "author": "Ibrahim Abu Farha; Steven Wilson; Silviu Oprea; Walid Magdy", + "authorids": "/i/ibrahim-abu-farha/; /s/steven-wilson/; /s/silviu-oprea/; /w/walid-magdy/", + "bibtex": "@inproceedings{abu-farha-etal-2022-sarcasm,\n title = \"Sarcasm Detection is Way Too Easy! An Empirical Comparison of Human and Machine Sarcasm Detection\",\n author = \"Abu Farha, Ibrahim and\n Wilson, Steven and\n Oprea, Silviu and\n Magdy, Walid\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.387/\",\n doi = \"10.18653/v1/2022.findings-emnlp.387\",\n pages = \"5284--5295\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.387.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.387/", + "pdf_size": 913742, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8022234229586720041&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 2, + "aff": "School of Informatics, The University of Edinburgh, Edinburgh, UK; Oakland University, Rochester, MI, USA; School of Informatics, The University of Edinburgh, Edinburgh, UK; The Alan Turing Institute, London, UK", + "aff_domain": "ed.ac.uk;oakland.edu;ed.ac.uk;ed.ac.uk", + "email": "ed.ac.uk;oakland.edu;ed.ac.uk;ed.ac.uk", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "The University of Edinburgh;Oakland University;The Alan Turing Institute", + "aff_unique_dep": "School of Informatics;;", + "aff_unique_url": "https://www.ed.ac.uk;https://www.oakland.edu;https://www.turing.ac.uk", + "aff_unique_abbr": "Edinburgh;OU;ATI", + "aff_campus_unique_index": "0;1;0;2", + "aff_campus_unique": "Edinburgh;Rochester;London", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "United Kingdom;United States" + }, + { + "id": "2022.emnlp-main.701", + "title": "Saving Dense Retriever from Shortcut Dependency in Conversational Search", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Conversational search (CS) needs a holistic understanding of conversational inputs to retrieve relevant passages. In this paper, we demonstrate the existence of a retrieval shortcut in CS, which causes models to retrieve passages solely relying on partial history while disregarding the latest question. With in-depth analysis, we first show that naively trained dense retrievers heavily exploit the shortcut and hence perform poorly when asked to answer history-independent questions. To build more robust models against shortcut dependency, we explore various hard negative mining strategies. Experimental results show that training with the model-based hard negatives effectively mitigates the dependency on the shortcut, significantly improving dense retrievers on recent CS benchmarks. In particular, our retriever outperforms the previous state-of-the-art model by 11.0 in Recall@10 on QReCC.", + "author": "Sungdong Kim; Gangwoo Kim", + "authorids": "/s/sungdong-kim/; /g/gangwoo-kim/", + "bibtex": "@inproceedings{kim-kim-2022-saving,\n title = \"Saving Dense Retriever from Shortcut Dependency in Conversational Search\",\n author = \"Kim, Sungdong and\n Kim, Gangwoo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.701/\",\n doi = \"10.18653/v1/2022.emnlp-main.701\",\n pages = \"10278--10287\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.701.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.701/", + "pdf_size": 652385, + "gs_citation": 32, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1403236977121874064&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "NA VER AI Lab + KAIST AI; Korea University", + "aff_domain": "navercorp.com;korea.ac.kr", + "email": "navercorp.com;korea.ac.kr", + "github": "github.com/naver-ai/cs-shortcut", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;2", + "aff_unique_norm": "NAVER Corporation;Korea Advanced Institute of Science and Technology;Korea University", + "aff_unique_dep": "AI Lab;KAIST AI;", + "aff_unique_url": "https://www.naver.com;https://www.kaist.edu;https://www.korea.ac.kr", + "aff_unique_abbr": "NAVER;KAIST;KU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.544", + "title": "Scaling Laws Under the Microscope: Predicting Transformer Performance from Small Scale Experiments", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Neural scaling laws define a predictable relationship between a model\u2019s parameter count and its performance after training in the form of a power law. However, most research to date has not explicitly investigated whether scaling laws can be used to accelerate model development. In this work, we perform such an empirical investigation across a wide range of language understanding tasks, starting from models with as few as 10K parameters, and evaluate downstream performance across 9 language understanding tasks.We find that scaling laws emerge at finetuning time in some NLP tasks, and that they can also be exploited for debugging convergence when training large models. Moreover, for tasks where scaling laws exist, they can be used to predict the performance of larger models, which enables effective model selection. However, revealing scaling lawsrequires careful hyperparameter tuning and multiple runs for the purpose of uncertainty estimation, which incurs additional overhead, partially offsetting the computational benefits.", + "author": "Maor Ivgi; Yair Carmon; Jonathan Berant", + "authorids": "/m/maor-ivgi/; /y/yair-carmon/; /j/jonathan-berant/", + "bibtex": "@inproceedings{ivgi-etal-2022-scaling,\n title = \"Scaling Laws Under the Microscope: Predicting Transformer Performance from Small Scale Experiments\",\n author = \"Ivgi, Maor and\n Carmon, Yair and\n Berant, Jonathan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.544/\",\n doi = \"10.18653/v1/2022.findings-emnlp.544\",\n pages = \"7354--7371\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.544.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.544/", + "pdf_size": 650449, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12897523673944617465&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "The Blavatnik School of Computer Science, Tel-Aviv University; The Blavatnik School of Computer Science, Tel-Aviv University; The Blavatnik School of Computer Science, Tel-Aviv University", + "aff_domain": "cs.tau.ac.il;tauex.tau.ac.il;cs.tau.ac.il", + "email": "cs.tau.ac.il;tauex.tau.ac.il;cs.tau.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Tel-Aviv University", + "aff_unique_dep": "Blavatnik School of Computer Science", + "aff_unique_url": "https://www.tau.ac.il", + "aff_unique_abbr": "TAU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Tel-Aviv", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Israel" + }, + { + "id": "2022.findings-emnlp.347", + "title": "SciFact-Open: Towards open-domain scientific claim verification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While research on scientific claim verification has led to the development of powerful systems that appear to approach human performance, these approaches have yet to be tested in a realistic setting against large corpora of scientific literature. Moving to this open-domain evaluation setting, however, poses unique challenges; in particular, it is infeasible to exhaustively annotate all evidence documents. In this work, we present SciFact-Open, a new test collection designed to evaluate the performance of scientific claim verification systems on a corpus of 500K research abstracts. Drawing upon pooling techniques from information retrieval, we collect evidence for scientific claims by pooling and annotating the top predictions of four state-of-the-art scientific claim verification models. We find that systems developed on smaller corpora struggle to generalize to SciFact-Open, exhibiting performance drops of at least 15 F1. In addition, analysis of the evidence in SciFact-Open reveals interesting phenomena likely to appear when claim verification systems are deployed in practice, e.g., cases where the evidence supports only a special case of the claim. Our dataset is available at https://github.com/dwadden/scifact-open.", + "author": "David Wadden; Kyle Lo; Bailey Kuehl; Arman Cohan; Iz Beltagy; Lucy Lu Wang; Hannaneh Hajishirzi", + "authorids": "/d/david-wadden/; /k/kyle-lo/; /b/bailey-kuehl/; /a/arman-cohan/; /i/iz-beltagy/; /l/lucy-lu-wang/; /h/hannaneh-hajishirzi/", + "bibtex": "@inproceedings{wadden-etal-2022-scifact,\n title = \"{S}ci{F}act-Open: Towards open-domain scientific claim verification\",\n author = \"Wadden, David and\n Lo, Kyle and\n Kuehl, Bailey and\n Cohan, Arman and\n Beltagy, Iz and\n Wang, Lucy Lu and\n Hajishirzi, Hannaneh\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.347/\",\n doi = \"10.18653/v1/2022.findings-emnlp.347\",\n pages = \"4719--4734\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.347.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.347/", + "pdf_size": 641700, + "gs_citation": 64, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2212935191531147534&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "University of Washington, Seattle, WA, USA+Allen Institute for Artificial Intelligence, Seattle, WA, USA; Allen Institute for Artificial Intelligence, Seattle, WA, USA; Allen Institute for Artificial Intelligence, Seattle, WA, USA; Allen Institute for Artificial Intelligence, Seattle, WA, USA; Allen Institute for Artificial Intelligence, Seattle, WA, USA; University of Washington, Seattle, WA, USA+Allen Institute for Artificial Intelligence, Seattle, WA, USA; University of Washington, Seattle, WA, USA+Allen Institute for Artificial Intelligence, Seattle, WA, USA", + "aff_domain": "cs.washington.edu;allenai.org;allenai.org;allenai.org;allenai.org;uw.edu;cs.washington.edu", + "email": "cs.washington.edu;allenai.org;allenai.org;allenai.org;allenai.org;uw.edu;cs.washington.edu", + "github": "https://github.com/dwadden/scifact-open", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1;1;1;1;0+1;0+1", + "aff_unique_norm": "University of Washington;Allen Institute for Artificial Intelligence", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.washington.edu;https://allenai.org", + "aff_unique_abbr": "UW;AI2", + "aff_campus_unique_index": "0+0;0;0;0;0;0+0;0+0", + "aff_campus_unique": "Seattle", + "aff_country_unique_index": "0+0;0;0;0;0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.775", + "title": "ScienceWorld: Is your Agent Smarter than a 5th Grader?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present ScienceWorld, a benchmark to test agents\u2019 scientific reasoning abilities in a new interactive text environment at the level of a standard elementary school science curriculum. Despite the transformer-based progress seen in question-answering and scientific text processing, we find that current models cannot reason about or explain learned science concepts in novel contexts. For instance, models can easily answer what the conductivity of a known material is but struggle when asked how they would conduct an experiment in a grounded environment to find the conductivity of an unknown material. This begs the question of whether current models are simply retrieving answers by way of seeing a large number of similar examples or if they have learned to reason about concepts in a reusable manner. We hypothesize that agents need to be grounded in interactive environments to achieve such reasoning capabilities. Our experiments provide empirical evidence supporting this hypothesis \u2013 showing that a 1.5 million parameter agent trained interactively for 100k steps outperforms a 11 billion parameter model statically trained for scientific question-answering and reasoning from millions of expert demonstrations.", + "author": "Ruoyao Wang; Peter Jansen; Marc-Alexandre C\u00f4t\u00e9; Prithviraj Ammanabrolu", + "authorids": "/r/ruoyao-wang/; /p/peter-jansen/; /m/marc-alexandre-cote/; /p/prithviraj-ammanabrolu/", + "bibtex": "@inproceedings{wang-etal-2022-scienceworld,\n title = \"{S}cience{W}orld: Is your Agent Smarter than a 5th Grader?\",\n author = \"Wang, Ruoyao and\n Jansen, Peter and\n C{\\^o}t{\\'e}, Marc-Alexandre and\n Ammanabrolu, Prithviraj\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.775/\",\n doi = \"10.18653/v1/2022.emnlp-main.775\",\n pages = \"11279--11298\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.775.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.775/", + "pdf_size": 793262, + "gs_citation": 126, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6498940433401253783&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Arizona, Tucson, AZ; University of Arizona, Tucson, AZ; Microsoft Research Montr\u00e9al; Allen Institute for AI, Seattle, WA", + "aff_domain": "arizona.edu;arizona.edu;microsoft.com;allenai.org", + "email": "arizona.edu;arizona.edu;microsoft.com;allenai.org", + "github": "https://github.com/allenai/ScienceWorld", + "project": "https://sciworld.apps.allenai.org", + "author_num": 4, + "aff_unique_index": "0;0;1;2", + "aff_unique_norm": "University of Arizona;Microsoft Research;Allen Institute for AI", + "aff_unique_dep": ";Microsoft Research;", + "aff_unique_url": "https://www.arizona.edu;https://www.microsoft.com/en-us/research/group/microsoft-research-montreal;https://allenai.org", + "aff_unique_abbr": "UA;MSR Montreal;AI2", + "aff_campus_unique_index": "0;0;1;2", + "aff_campus_unique": "Tucson;Montr\u00e9al;Seattle", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "United States;Canada" + }, + { + "id": "2022.emnlp-main.270", + "title": "Scientific Paper Extractive Summarization Enhanced by Citation Graphs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In a citation graph, adjacent paper nodes share related scientific terms and topics. The graph thus conveys unique structure information of document-level relatedness that can be utilized in the paper summarization task, for exploring beyond the intra-document information.In this work, we focus on leveraging citation graphs to improve scientific paper extractive summarization under different settings.We first propose a Multi-granularity Unsupervised Summarization model (MUS) as a simple and low-cost solution to the task.MUS finetunes a pre-trained encoder model on the citation graph by link prediction tasks.Then, the abstract sentences are extracted from the corresponding paper considering multi-granularity information.Preliminary results demonstrate that citation graph is helpful even in a simple unsupervised framework.Motivated by this, we next propose a Graph-based Supervised Summarizationmodel (GSS) to achieve more accurate results on the task when large-scale labeled data are available.Apart from employing the link prediction as an auxiliary task, GSS introduces a gated sentence encoder and a graph information fusion module to take advantage of the graph information to polish the sentence representation.Experiments on a public benchmark dataset show that MUS and GSS bring substantial improvements over the prior state-of-the-art model.", + "author": "Xiuying Chen; Mingzhe Li; Shen Gao; Rui Yan; Xin Gao; Xiangliang Zhang", + "authorids": "/x/xiuying-chen/; /m/mingzhe-li/; /s/shen-gao/; /r/rui-yan/; /x/xin-gao/; /x/xiangliang-zhang/", + "bibtex": "@inproceedings{chen-etal-2022-scientific,\n title = \"Scientific Paper Extractive Summarization Enhanced by Citation Graphs\",\n author = \"Chen, Xiuying and\n Li, Mingzhe and\n Gao, Shen and\n Yan, Rui and\n Gao, Xin and\n Zhang, Xiangliang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.270/\",\n doi = \"10.18653/v1/2022.emnlp-main.270\",\n pages = \"4053--4062\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.270.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.270/", + "pdf_size": 548807, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=912627219472599075&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Computational Bioscience Reseach Center, KAUST; Ant Group; School of Computer Science and Technology, Shandong University; Gaoling School of Artificial Intelligence, Renmin University of China; Computational Bioscience Reseach Center, KAUST + University of Notre Dame; University of Notre Dame + Computational Bioscience Reseach Center, KAUST", + "aff_domain": "kaust.edu.sa;antgroup.com; ; ; ; ", + "email": "kaust.edu.sa;antgroup.com; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;0+4;4+0", + "aff_unique_norm": "King Abdullah University of Science and Technology;Ant Group;Shandong University;Renmin University of China;University of Notre Dame", + "aff_unique_dep": "Computational Bioscience Research Center;;School of Computer Science and Technology;Gaoling School of Artificial Intelligence;", + "aff_unique_url": "https://www.kaust.edu.sa;https://www.antgroup.com;http://www.sdu.edu.cn;http://www.ruc.edu.cn;https://www.nd.edu", + "aff_unique_abbr": "KAUST;Ant Group;;RUC;Notre Dame", + "aff_campus_unique_index": "1;;", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;1;1;1;0+2;2+0", + "aff_country_unique": "Saudi Arabia;China;United States" + }, + { + "id": "2022.findings-emnlp.153", + "title": "Scientific and Creative Analogies in Pretrained Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This paper examines the encoding of analogy in large-scale pretrained language models, such as BERT and GPT-2. Existing analogy datasets typically focus on a limited set of analogical relations, with a high similarity of the two domains between which the analogy holds. As a more realistic setup, we introduce the Scientific and Creative Analogy dataset (SCAN), a novel analogy dataset containing systematic mappings of multiple attributes and relational structures across dissimilar domains. Using this dataset, we test the analogical reasoning capabilities of several widely-used pretrained language models (LMs). We find that state-of-the-art LMs achieve low performance on these complex analogy tasks, highlighting the challenges still posed by analogy understanding.", + "author": "Tamara Czinczoll; Helen Yannakoudakis; Pushkar Mishra; Ekaterina Shutova", + "authorids": "/t/tamara-czinczoll/; /h/helen-yannakoudakis/; /p/pushkar-mishra/; /e/ekaterina-shutova/", + "bibtex": "@inproceedings{czinczoll-etal-2022-scientific,\n title = \"Scientific and Creative Analogies in Pretrained Language Models\",\n author = \"Czinczoll, Tamara and\n Yannakoudakis, Helen and\n Mishra, Pushkar and\n Shutova, Ekaterina\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.153/\",\n doi = \"10.18653/v1/2022.findings-emnlp.153\",\n pages = \"2094--2100\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.153.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.153/", + "pdf_size": 169370, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18360510207073500213&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "ILLC, University of Amsterdam, the Netherlands + Hasso Plattner Institute/University of Potsdam, Germany; Dept. of Informatics, King\u2019s College London, United Kingdom; Meta AI, London, United Kingdom; ILLC, University of Amsterdam, the Netherlands", + "aff_domain": "hpi.de;kcl.ac.uk;meta.com;uva.nl", + "email": "hpi.de;kcl.ac.uk;meta.com;uva.nl", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;3;0", + "aff_unique_norm": "University of Amsterdam;Hasso Plattner Institute;King\u2019s College London;Meta AI", + "aff_unique_dep": "ILLC;;Dept. of Informatics;", + "aff_unique_url": "https://www.uva.nl;https://www.hpi.de;https://www.kcl.ac.uk;", + "aff_unique_abbr": "UvA;HPI;KCL;", + "aff_campus_unique_index": "0;2;0", + "aff_campus_unique": "Amsterdam;;London", + "aff_country_unique_index": "0+1;2;2;0", + "aff_country_unique": "Netherlands;Germany;United Kingdom" + }, + { + "id": "2022.findings-emnlp.458", + "title": "Search to Pass Messages for Temporal Knowledge Graph Completion", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Completing missing facts is a fundamental task for temporal knowledge graphs (TKGs).Recently, graph neural network (GNN) based methods, which can simultaneously explore topological and temporal information, have become the state-of-the-art (SOTA) to complete TKGs. However, these studies are based on hand-designed architectures and fail to explore the diverse topological and temporal properties of TKG.To address this issue, we propose to use neural architecture search (NAS) to design data-specific message passing architecture for TKG completion.In particular, we develop a generalized framework to explore topological and temporal information in TKGs.Based on this framework, we design an expressive search space to fully capture various properties of different TKGs. Meanwhile, we adopt a search algorithm, which trains a supernet structure by sampling single path for efficient search with less cost.We further conduct extensive experiments on three benchmark datasets. The results show that the searched architectures by our method achieve the SOTA performances.Besides, the searched models can also implicitly reveal diverse properties in different TKGs.Our code is released in https://github.com/striderdu/SPA.", + "author": "Zhen Wang; Haotong Du; Quanming Yao; Xuelong Li", + "authorids": "/z/zhen-wang/; /h/haotong-du/; /q/quanming-yao/; /x/xuelong-li/", + "bibtex": "@inproceedings{wang-etal-2022-search,\n title = \"Search to Pass Messages for Temporal Knowledge Graph Completion\",\n author = \"Wang, Zhen and\n Du, Haotong and\n Yao, Quanming and\n Li, Xuelong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.458/\",\n doi = \"10.18653/v1/2022.findings-emnlp.458\",\n pages = \"6160--6172\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.458.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.458/", + "pdf_size": 1046893, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16600805426064962966&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Computer Science, Northwestern Polytechnical University, China+School of Arti\ufb01cial Intelligence, Optics and Electronics (iOPEN), Northwestern Polytechnical University, China; School of Computer Science, Northwestern Polytechnical University, China+School of Arti\ufb01cial Intelligence, Optics and Electronics (iOPEN), Northwestern Polytechnical University, China; Department of Electronic Engineering, Tsinghua University, China; School of Arti\ufb01cial Intelligence, Optics and Electronics (iOPEN), Northwestern Polytechnical University, China", + "aff_domain": "nwpu.edu.cn;mail.nwpu.edu.cn;tsinghua.edu.cn;nwpu.edu.cn", + "email": "nwpu.edu.cn;mail.nwpu.edu.cn;tsinghua.edu.cn;nwpu.edu.cn", + "github": "https://github.com/striderdu/SPA", + "project": "", + "author_num": 4, + "aff_unique_index": "0+0;0+0;1;0", + "aff_unique_norm": "Northwestern Polytechnical University;Tsinghua University", + "aff_unique_dep": "School of Computer Science;Department of Electronic Engineering", + "aff_unique_url": "https://www.nwpu.edu.cn;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "NPU;Tsinghua", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.115", + "title": "Seeded Hierarchical Clustering for Expert-Crafted Taxonomies", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Practitioners from many disciplines (e.g., political science) use expert-crafted taxonomies to make sense of large, unlabeled corpora. In this work, we study Seeded Hierarchical Clustering (SHC): the task of automatically fitting unlabeled data to such taxonomies using a small set of labeled examples. We propose HierSeed, a novel weakly supervised algorithm for this task that uses only a small set of labeled seed examples in a computation and data efficient manner. HierSeed assigns documents to topics by weighing document density against topic hierarchical structure. It outperforms unsupervised and supervised baselines for the SHC task on three real-world datasets.", + "author": "Anish Saha; Amith Ananthram; Emily Allaway; Heng Ji; Kathleen McKeown", + "authorids": "/a/anish-saha/; /a/amith-ananthram/; /e/emily-allaway/; /h/heng-ji/; /k/kathleen-mckeown/", + "bibtex": "@inproceedings{saha-etal-2022-seeded,\n title = \"Seeded Hierarchical Clustering for Expert-Crafted Taxonomies\",\n author = \"Saha, Anish and\n Ananthram, Amith and\n Allaway, Emily and\n Ji, Heng and\n McKeown, Kathleen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.115/\",\n doi = \"10.18653/v1/2022.findings-emnlp.115\",\n pages = \"1595--1609\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.115.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.115/", + "pdf_size": 3228632, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16957273066895732771&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "Columbia University; Columbia University; Columbia University; University of Illinois Urbana-Champaign; Columbia University", + "aff_domain": "columbia.edu;columbia.edu;cs.columbia.edu;illinois.edu;cs.columbia.edu", + "email": "columbia.edu;columbia.edu;cs.columbia.edu;illinois.edu;cs.columbia.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Columbia University;University of Illinois at Urbana-Champaign", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.columbia.edu;https://illinois.edu", + "aff_unique_abbr": "Columbia;UIUC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Urbana-Champaign", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.44", + "title": "Segmenting Numerical Substitution Ciphers", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Deciphering historical substitution ciphers is a challenging problem. Example problems that have been previously studied include detecting cipher type, detecting plaintext language, and acquiring the substitution key for segmented ciphers. However, attacking unsegmented ciphers is still a challenging task. Segmentation (i.e. finding substitution units) is essential for cracking those ciphers. In this work, we propose the first automatic methods to segment those ciphers using Byte Pair Encoding (BPE) and unigram language models. Our methods achieve an average segmentation error of 2% on 100 randomly-generated monoalphabetic ciphers and 27% on 3 real historical homophonic ciphers. We also propose a method for solving non-deterministic ciphers with existing keys using a lattice and a pretrained language model. Our method leads to the full solution of the IA cipher; a real historical cipher that has not been fully solved until this work.", + "author": "Nada Aldarrab; Jonathan May", + "authorids": "/n/nada-aldarrab/; /j/jonathan-may/", + "bibtex": "@inproceedings{aldarrab-may-2022-segmenting,\n title = \"Segmenting Numerical Substitution Ciphers\",\n author = \"Aldarrab, Nada and\n May, Jonathan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.44/\",\n doi = \"10.18653/v1/2022.emnlp-main.44\",\n pages = \"706--714\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.44.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.44/", + "pdf_size": 1667859, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9080146717656992507&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Department of Information Technology, King Abdulaziz University; Information Sciences Institute, University of Southern California", + "aff_domain": "kau.edu.sa;isi.edu", + "email": "kau.edu.sa;isi.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "King Abdulaziz University;University of Southern California", + "aff_unique_dep": "Department of Information Technology;Information Sciences Institute", + "aff_unique_url": "https://www.kau.edu.sa;https://www.usc.edu", + "aff_unique_abbr": "KAU;USC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Saudi Arabia;United States" + }, + { + "id": "2022.findings-emnlp.149", + "title": "Self-Distillation with Meta Learning for Knowledge Graph Completion", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this paper, we propose a self-distillation framework with meta learning (MetaSD) for knowledge graph completion with dynamic pruning, which aims to learn compressed graph embeddings and tackle the long-tail samples. Specifically, we first propose a dynamic pruning technique to obtain a small pruned model from a large source model, where the pruning mask of the pruned model could be updated adaptively per epoch after the model weights are updated. The pruned model is supposed to be more sensitive to difficult-to-memorize samples (e.g., long-tail samples) than the source model. Then, we propose a one-step meta self-distillation method for distilling comprehensive knowledge from the source model to the pruned model, where the two models co-evolve in a dynamic manner during training. In particular, we exploit the performance of the pruned model, which is trained alongside the source model in one iteration, to improve the source model\u2019s knowledge transfer ability for the next iteration via meta learning. Extensive experiments show that MetaSD achieves competitive performance compared to strong baselines, while being 10x smaller than baselines.", + "author": "Yunshui Li; Junhao Liu; Min Yang; Chengming Li", + "authorids": "/y/yunshui-li/; /j/junhao-liu/; /m/min-yang/; /c/chengming-li/", + "bibtex": "@inproceedings{li-etal-2022-self,\n title = \"Self-Distillation with Meta Learning for Knowledge Graph Completion\",\n author = \"Li, Yunshui and\n Liu, Junhao and\n Yang, Min and\n Li, Chengming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.149/\",\n doi = \"10.18653/v1/2022.findings-emnlp.149\",\n pages = \"2048--2054\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.149.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.149/", + "pdf_size": 449270, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2603919197571085470&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff": "Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences+University of Chinese Academy of Sciences; Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences; School of Intelligent Systems Engineering, Sun Yat-sen University; Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences", + "aff_domain": "siat.ac.cn;siat.ac.cn;mail.sysu.edu.cn;siat.ac.cn", + "email": "siat.ac.cn;siat.ac.cn;mail.sysu.edu.cn;siat.ac.cn", + "github": "https://github.com/pldlgb/MetaSD", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;2;0", + "aff_unique_norm": "Shenzhen Institute of Advanced Technology;University of Chinese Academy of Sciences;Sun Yat-sen University", + "aff_unique_dep": ";;School of Intelligent Systems Engineering", + "aff_unique_url": "http://www.siat.cas.cn;http://www.ucas.ac.cn;http://www.sysu.edu.cn/", + "aff_unique_abbr": "SIAT;UCAS;SYSU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.375", + "title": "Self-supervised Cross-modal Pretraining for Speech Emotion Recognition and Sentiment Analysis", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multimodal speech emotion recognition (SER) and sentiment analysis (SA) are important techniques for human-computer interaction. Most existing multimodal approaches utilize either shallow cross-modal fusion of pretrained features, or deep cross-modal fusion with raw features. Recently, attempts have been made to fuse pretrained feature representations in a deep fusion manner during fine-tuning stage. However those approaches have not led to improved results, partially due to their relatively simple fusion mechanisms and lack of proper cross-modal pretraining. In this work, leveraging single-modal pretrained models (RoBERTa and HuBERT), we propose a novel deeply-fused audio-text bi-modal transformer with carefully designed cross-modal fusion mechanism and a stage-wise cross-modal pretraining scheme to fully facilitate the cross-modal learning. Our experiment results show that the proposed method achieves state-of-the-art results on the public IEMOCAP emotion and CMU-MOSEI sentiment datasets, exceeding the previous benchmarks by a large margin.", + "author": "Iek-Heng Chu; Ziyi Chen; Xinlu Yu; Mei Han; Jing Xiao; Peng Chang", + "authorids": "/i/iek-heng-chu/; /z/ziyi-chen/; /x/xinlu-yu/; /m/mei-han/; /j/jing-xiao/; /p/peng-chang/", + "bibtex": "@inproceedings{chu-etal-2022-self,\n title = \"Self-supervised Cross-modal Pretraining for Speech Emotion Recognition and Sentiment Analysis\",\n author = \"Chu, Iek-Heng and\n Chen, Ziyi and\n Yu, Xinlu and\n Han, Mei and\n Xiao, Jing and\n Chang, Peng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.375/\",\n doi = \"10.18653/v1/2022.findings-emnlp.375\",\n pages = \"5105--5114\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.375.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.375/", + "pdf_size": 374481, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8236342918235518211&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "PAII Inc., Palo Alto, USA; PAII Inc., Palo Alto, USA; PAII Inc., Palo Alto, USA; PAII Inc., Palo Alto, USA; Ping An Technology, Shenzhen, China; PAII Inc., Palo Alto, USA", + "aff_domain": "paii-labs.com;paii-labs.com;paii-labs.com;paii-labs.com;pingan.com.cn;paii-labs.com", + "email": "paii-labs.com;paii-labs.com;paii-labs.com;paii-labs.com;pingan.com.cn;paii-labs.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "PAII Inc.;Ping An Technology", + "aff_unique_dep": ";", + "aff_unique_url": ";https://www.pingan.com", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Palo Alto;", + "aff_country_unique_index": "0;0;0;0;1;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.321", + "title": "Self-supervised Graph Masking Pre-training for Graph-to-Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large-scale pre-trained language models (PLMs) have advanced Graph-to-Text (G2T) generation by processing the linearised version of a graph. However, the linearisation is known to ignore the structural information. Additionally, PLMs are typically pre-trained on free text which introduces domain mismatch between pre-training and downstream G2T generation tasks. To address these shortcomings, we propose graph masking pre-training strategies that neither require supervision signals nor adjust the architecture of the underlying pre-trained encoder-decoder model. When used with a pre-trained T5, our approach achieves new state-of-the-art results on WebNLG+2020 and EventNarrative G2T generation datasets. Our method also shows to be very effective in the low-resource setting.", + "author": "Jiuzhou Han; Ehsan Shareghi", + "authorids": "/j/jiuzhou-han/; /e/ehsan-shareghi/", + "bibtex": "@inproceedings{han-shareghi-2022-self,\n title = \"Self-supervised Graph Masking Pre-training for Graph-to-Text Generation\",\n author = \"Han, Jiuzhou and\n Shareghi, Ehsan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.321/\",\n doi = \"10.18653/v1/2022.emnlp-main.321\",\n pages = \"4845--4853\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.321.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.321/", + "pdf_size": 292963, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2343886150273505616&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Department of Data Science & AI, Monash University; Department of Data Science & AI, Monash University", + "aff_domain": "monash.edu;monash.edu", + "email": "monash.edu;monash.edu", + "github": "https://github.com/Jiuzhouh/Graph-Masking-Pre-training", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Monash University", + "aff_unique_dep": "Department of Data Science & AI", + "aff_unique_url": "https://www.monash.edu", + "aff_unique_abbr": "Monash", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Australia" + }, + { + "id": "2022.findings-emnlp.141", + "title": "Self-supervised Rewiring of Pre-trained Speech Encoders:Towards Faster Fine-tuning with Less Labels in Speech Processing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Pre-trained speech Transformers have facilitated great success across various speech processing tasks. However, fine-tuning these encoders for downstream tasks require sufficiently large training data to converge or to achieve state-of-the-art. In text domain this has been partly attributed to sub-optimality of the representation space in pre-trained Transformers. In this work, we take a sober look into pre-trained speech encoders and rewire their representation space without requiring any task-specific labels. Our method utilises neutrally synthesised version of audio inputs along with frame masking to construct positive pairs for contrastive self-supervised learning. When used for augmenting the wav2vec 2 encoder, we observe consistent improvement of isotropy in the representation space. Our experiments on 6 speech processing tasks, exhibit a significant convergence speedup during task fine-tuning as well as consistent task improvement, specially in low-resource settings.", + "author": "Hao Yang; Jinming Zhao; Gholamreza Haffari; Ehsan Shareghi", + "authorids": "/h/hao-yang/; /j/jinming-zhao/; /g/gholamreza-haffari/; /e/ehsan-shareghi/", + "bibtex": "@inproceedings{yang-etal-2022-self,\n title = \"Self-supervised Rewiring of Pre-trained Speech Encoders:Towards Faster Fine-tuning with Less Labels in Speech Processing\",\n author = \"Yang, Hao and\n Zhao, Jinming and\n Haffari, Gholamreza and\n Shareghi, Ehsan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.141/\",\n doi = \"10.18653/v1/2022.findings-emnlp.141\",\n pages = \"1952--1959\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.141.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.141/", + "pdf_size": 6217775, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17735659255778671605&as_sdt=8000005&sciodt=0,19&hl=en", + "gs_version_total": 4, + "aff": "Department of Data Science & AI, Monash University; Department of Data Science & AI, Monash University; Department of Data Science & AI, Monash University; Department of Data Science & AI, Monash University", + "aff_domain": "monash.edu;monash.edu;monash.edu;monash.edu", + "email": "monash.edu;monash.edu;monash.edu;monash.edu", + "github": "https://github.com/YangHao97/rewireW2V2", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Monash University", + "aff_unique_dep": "Department of Data Science & AI", + "aff_unique_url": "https://www.monash.edu", + "aff_unique_abbr": "Monash", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Australia" + }, + { + "id": "2022.findings-emnlp.201", + "title": "Self-training with Two-phase Self-augmentation for Few-shot Dialogue Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In task-oriented dialogue systems, response generation from meaning representations (MRs) often suffers from limited training examples, due to the high cost of annotating MR-to-Text pairs. Previous works on self-training leverage fine-tuned conversational models to automatically generate pseudo-labeled MR-to-Text pairs for further fine-tuning. However, some self-augmented data may be noisy or uninformative for the model to learn from. In this work, we propose a two-phase self-augmentation procedure to generate high-quality pseudo-labeled MR-to-Text pairs: the first phase selects the most informative MRs based on model\u2019s prediction uncertainty; with the selected MRs, the second phase generates accurate responses by aggregating multiple perturbed latent representations from each MR. Empirical experiments on two benchmark datasets, FewShotWOZ and FewShotSGD, show that our method generally outperforms existing self-training methods on both automatic and human evaluations.", + "author": "Wanyu Du; Hanjie Chen; Yangfeng Ji", + "authorids": "/w/wanyu-du/; /h/hanjie-chen/; /y/yangfeng-ji/", + "bibtex": "@inproceedings{du-etal-2022-self,\n title = \"Self-training with Two-phase Self-augmentation for Few-shot Dialogue Generation\",\n author = \"Du, Wanyu and\n Chen, Hanjie and\n Ji, Yangfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.201/\",\n doi = \"10.18653/v1/2022.findings-emnlp.201\",\n pages = \"2770--2784\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.201.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.201/", + "pdf_size": 555779, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3843876382264881731&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science, University of Virginia; Department of Computer Science, University of Virginia; Department of Computer Science, University of Virginia", + "aff_domain": "virginia.edu;virginia.edu;virginia.edu", + "email": "virginia.edu;virginia.edu;virginia.edu", + "github": "https://github.com/wyu-du/Self-Training-Dialogue-Generation", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Virginia", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.virginia.edu", + "aff_unique_abbr": "UVA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.452", + "title": "Semantic Dependency Parsing with Edge GNNs", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Second-order neural parsers have obtained high accuracy in semantic dependency parsing. Inspired by the factor graph representation of second-order parsing, we propose edge graph neural networks (E-GNNs). In an E-GNN, each node corresponds to a dependency edge, and the neighbors are defined in terms of sibling, co-parent, and grandparent relationships. We conduct experiments on SemEval 2015 Task 18 English datasets, showing the superior performance of E-GNNs.", + "author": "Songlin Yang; Kewei Tu", + "authorids": "/s/songlin-yang/; /k/kewei-tu/", + "bibtex": "@inproceedings{yang-tu-2022-semantic,\n title = \"Semantic Dependency Parsing with Edge {GNN}s\",\n author = \"Yang, Songlin and\n Tu, Kewei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.452/\",\n doi = \"10.18653/v1/2022.findings-emnlp.452\",\n pages = \"6096--6102\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.452.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.452/", + "pdf_size": 283171, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6101883792196372805&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "School of Information Science and Technology, ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging; School of Information Science and Technology, ShanghaiTech University + Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "aff_domain": "shanghaitech.edu.cn;shanghaitech.edu.cn", + "email": "shanghaitech.edu.cn;shanghaitech.edu.cn", + "github": "https://github.com/sustcsonglin/gnn-sdp", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "ShanghaiTech University;Shanghai Engineering Research Center of Intelligent Vision and Imaging", + "aff_unique_dep": "School of Information Science and Technology;", + "aff_unique_url": "https://www.shanghaitech.edu.cn;", + "aff_unique_abbr": "ShanghaiTech;", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.122", + "title": "Semantic Framework based Query Generation for Temporal Question Answering over Knowledge Graphs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Answering factual questions with temporal intent over knowledge graphs (temporal KGQA) attracts rising attention in recent years.In the generation of temporal queries, existing KGQA methods ignore the fact that some intrinsic connections between events can make them temporally related, which may limit their capability.We systematically analyze the possible interpretation of temporal constraints and conclude the interpretation structures as the Semantic Framework of Temporal Constraints, SF-TCons.Based on the semantic framework, we propose a temporal question answering method, SF-TQA, which generates query graphs by exploring the relevant facts of mentioned entities, where the exploring process is restricted by SF-TCons. Our evaluations show that SF-TQA significantly outperforms existing methods on two benchmarks over different knowledge graphs.", + "author": "Wentao Ding; Hao Chen; Huayu Li; Yuzhong Qu", + "authorids": "/w/wentao-ding/; /h/hao-chen/; /h/huayu-li/; /y/yuzhong-qu/", + "bibtex": "@inproceedings{ding-etal-2022-semantic,\n title = \"Semantic Framework based Query Generation for Temporal Question Answering over Knowledge Graphs\",\n author = \"Ding, Wentao and\n Chen, Hao and\n Li, Huayu and\n Qu, Yuzhong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.122/\",\n doi = \"10.18653/v1/2022.emnlp-main.122\",\n pages = \"1867--1877\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.122.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.122/", + "pdf_size": 375853, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1852410364335252236&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China; State Key Laboratory for Novel Software Technology, Nanjing University, China", + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;gmail.com;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;gmail.com;nju.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Nanjing University", + "aff_unique_dep": "State Key Laboratory for Novel Software Technology", + "aff_unique_url": "http://www.nju.edu.cn", + "aff_unique_abbr": "Nanjing U", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.627", + "title": "Semantic Novelty Detection and Characterization in Factual Text Involving Named Entities", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Much of the existing work on text novelty detection has been studied at the topic level, i.e., identifying whether the topic of a document or a sentence is novel or not. Little work has been done at the fine-grained semantic level (or contextual level). For example, given that we know Elon Musk is the CEO of a technology company, the sentence \u201cElon Musk acted in the sitcom The Big Bang Theory\u201d is novel and surprising because normally a CEO would not be an actor. Existing topic-based novelty detection methods work poorly on this problem because they do not perform semantic reasoning involving relations between named entities in the text and their background knowledge. This paper proposes an effective model (called PAT-SND) to solve the problem, which can also characterize the novelty. An annotated dataset is also created. Evaluation shows that PAT-SND outperforms 10 baselines by large margins.", + "author": "Nianzu Ma; Sahisnu Mazumder; Alexander Politowicz; Bing Liu; Eric Robertson; Scott Grigsby", + "authorids": "/n/nianzu-ma/; /s/sahisnu-mazumder/; /a/alexander-politowicz/; /b/bing-liu/; /e/eric-robertson/; /s/scott-grigsby/", + "bibtex": "@inproceedings{ma-etal-2022-semantic,\n title = \"Semantic Novelty Detection and Characterization in Factual Text Involving Named Entities\",\n author = \"Ma, Nianzu and\n Mazumder, Sahisnu and\n Politowicz, Alexander and\n Liu, Bing and\n Robertson, Eric and\n Grigsby, Scott\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.627/\",\n doi = \"10.18653/v1/2022.emnlp-main.627\",\n pages = \"9225--9252\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.627.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.627/", + "pdf_size": 623166, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=634851360110594933&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science, University of Illinois at Chicago, USA; Intel Labs, USA; Department of Computer Science, University of Illinois at Chicago, USA; Department of Computer Science, University of Illinois at Chicago, USA; PAR Government Systems Corporation, USA; PAR Government Systems Corporation, USA", + "aff_domain": "gmail.com;gmail.com;uic.edu;uic.edu;partech.com;partech.com", + "email": "gmail.com;gmail.com;uic.edu;uic.edu;partech.com;partech.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;2;2", + "aff_unique_norm": "University of Illinois at Chicago;Intel Labs;PAR Government Systems Corporation", + "aff_unique_dep": "Department of Computer Science;;", + "aff_unique_url": "https://www.uic.edu;https://www.intel.com/research;", + "aff_unique_abbr": "UIC;Intel;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Chicago;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.313", + "title": "Semantic Role Labeling Meets Definition Modeling: Using Natural Language to Describe Predicate-Argument Structures", + "track": "main", + "status": "finding", + "award": false, + "abstract": "One of the common traits of past and present approaches for Semantic Role Labeling (SRL) is that they rely upon discrete labels drawn from a predefined linguistic inventory to classify predicate senses and their arguments.However, we argue this need not be the case. In this paper, we present an approach that leverages Definition Modeling to introduce a generalized formulation of SRL as the task of describing predicate-argument structures using natural language definitions instead of discrete labels. Our novel formulation takes a first step towards placing interpretability and flexibility foremost, and yet our experiments and analyses on PropBank-style and FrameNet-style, dependency-based and span-based SRL also demonstrate that a flexible model with an interpretable output does not necessarily come at the expense of performance. We release our software for research purposes at https://github.com/SapienzaNLP/dsrl.", + "author": "Simone Conia; Edoardo Barba; Alessandro Scir\u00e8; Roberto Navigli", + "authorids": "/s/simone-conia/; /e/edoardo-barba/; /a/alessandro-scire/; /r/roberto-navigli/", + "bibtex": "@inproceedings{conia-etal-2022-semantic,\n title = \"Semantic Role Labeling Meets Definition Modeling: Using Natural Language to Describe Predicate-Argument Structures\",\n author = \"Conia, Simone and\n Barba, Edoardo and\n Scir{\\`e}, Alessandro and\n Navigli, Roberto\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.313/\",\n doi = \"10.18653/v1/2022.findings-emnlp.313\",\n pages = \"4253--4270\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.313.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.313/", + "pdf_size": 774235, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7131735664708809230&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "Sapienza University of Rome; Sapienza University of Rome; Sapienza University of Rome + Babelscape, Italy; Sapienza University of Rome", + "aff_domain": "uniroma1.it;uniroma1.it;babelscape.com;uniroma1.it", + "email": "uniroma1.it;uniroma1.it;babelscape.com;uniroma1.it", + "github": "https://github.com/SapienzaNLP/dsrl", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0+1;0", + "aff_unique_norm": "Sapienza University of Rome;Babelscape", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uniroma1.it;", + "aff_unique_abbr": "Sapienza;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Rome;", + "aff_country_unique_index": "0;0;0+0;0", + "aff_country_unique": "Italy" + }, + { + "id": "2022.emnlp-main.757", + "title": "Semantic Simplification for Sentiment Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work on document-level sentiment classification has shown that the sentiment in the original text is often hard to capture, since the sentiment is usually either expressed implicitly or shifted due to the occurrences of negation and rhetorical words. To this end, we enhance the original text with a sentiment-driven simplified clause to intensify its sentiment. The simplified clause shares the same opinion with the original text but expresses the opinion much more simply. Meanwhile, we employ Abstract Meaning Representation (AMR) for generating simplified clauses, since AMR explicitly provides core semantic knowledge, and potentially offers core concepts and explicit structures of original texts. Empirical studies show the effectiveness of our proposed model over several strong baselines. The results also indicate the importance of simplified clauses for sentiment classification.", + "author": "Xiaotong Jiang; Zhongqing Wang; Guodong Zhou", + "authorids": "/x/xiaotong-jiang/; /z/zhongqing-wang/; /g/guodong-zhou/", + "bibtex": "@inproceedings{jiang-etal-2022-semantic,\n title = \"Semantic Simplification for Sentiment Classification\",\n author = \"Jiang, Xiaotong and\n Wang, Zhongqing and\n Zhou, Guodong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.757/\",\n doi = \"10.18653/v1/2022.emnlp-main.757\",\n pages = \"11022--11032\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.757.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.757/", + "pdf_size": 673888, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2044060933644783528&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "Natural Language Processing Lab, Soochow University, Suzhou, China; Natural Language Processing Lab, Soochow University, Suzhou, China; Natural Language Processing Lab, Soochow University, Suzhou, China", + "aff_domain": "outlook.com;suda.edu.cn;suda.edu.cn", + "email": "outlook.com;suda.edu.cn;suda.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Soochow University", + "aff_unique_dep": "Natural Language Processing Lab", + "aff_unique_url": "http://www.soochow.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Suzhou", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.269", + "title": "Semantic-aware Contrastive Learning for More Accurate Semantic Parsing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Since the meaning representations are detailed and accurate annotations which express fine-grained sequence-level semtantics, it is usually hard to train discriminative semantic parsers via Maximum Likelihood Estimation (MLE) in an autoregressive fashion. In this paper, we propose a semantic-aware contrastive learning algorithm, which can learn to distinguish fine-grained meaning representations and take the overall sequence-level semantic into consideration. Specifically, a multi-level online sampling algorithm is proposed to sample confusing and diverse instances. Three semantic-aware similarity functions are designed to accurately measure the distance between meaning representations as a whole. And a ranked contrastive loss is proposed to pull the representations of the semantic-identical instances together and push negative instances away. Experiments on two standard datasets show that our approach achieves significant improvements over MLE baselines and gets state-of-the-art performances by simply applying semantic-aware contrastive learning on a vanilla Seq2Seq model.", + "author": "Shan Wu; Chunlei Xin; Bo Chen; Xianpei Han; Le Sun", + "authorids": "/s/shan-wu/; /c/chunlei-xin/; /b/bo-chen/; /x/xianpei-han/; /l/le-sun/", + "bibtex": "https://aclanthology.org/2022.emnlp-main.269.bib", + "pdf": "https://aclanthology.org/2022.emnlp-main.269.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.269/", + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10858316783287405360&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.findings-emnlp.290", + "title": "Semi-Supervised Lifelong Language Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Lifelong learning aims to accumulate knowledge and alleviate catastrophic forgetting when learning tasks sequentially. However, existing lifelong language learning methods only focus on the supervised learning setting. Unlabeled data, which can be easily accessed in real-world scenarios, are underexplored. In this paper, we explore a novel setting, semi-supervised lifelong language learning (SSLL), where a model learns sequentially arriving language tasks with both labeled and unlabeled data. We propose an unlabeled data enhanced lifelong learner to explore SSLL. Specially, we dedicate task-specific modules to alleviate catastrophic forgetting and design two modules to exploit unlabeled data: (1) a virtual supervision enhanced task solver is constructed on a teacher-student framework to mine the underlying knowledge from unlabeled data; and (2) a backward augmented learner is built to encourage knowledge transfer from newly arrived unlabeled data to previous tasks. Experimental results on various language tasks demonstrate our model\u2019s effectiveness and superiority over competitive baselines under the new setting SSLL.", + "author": "Yingxiu Zhao; Yinhe Zheng; Bowen Yu; Zhiliang Tian; Dongkyu Lee; Jian Sun; Yongbin Li; Nevin L. Zhang", + "authorids": "/y/yingxiu-zhao/; /y/yinhe-zheng/; /b/bowen-yu/; /z/zhiliang-tian/; /d/dongkyu-lee/; /j/jian-sun/; /y/yongbin-li/; /n/nevin-l-zhang/", + "bibtex": "@inproceedings{zhao-etal-2022-semi,\n title = \"Semi-Supervised Lifelong Language Learning\",\n author = \"Zhao, Yingxiu and\n Zheng, Yinhe and\n Yu, Bowen and\n Tian, Zhiliang and\n Lee, Dongkyu and\n Sun, Jian and\n Li, Yongbin and\n Zhang, Nevin L.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.290/\",\n doi = \"10.18653/v1/2022.findings-emnlp.290\",\n pages = \"3937--3951\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.290.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.290/", + "pdf_size": 935779, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9364374023807673959&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/AlibabaResearch/DAMO-ConvAI/tree/main/ssll", + "project": "", + "author_num": 8 + }, + { + "id": "2022.emnlp-industry.26", + "title": "Semi-supervised Adversarial Text Generation based on Seq2Seq models", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "To improve deep learning models\u2019 robustness, adversarial training has been frequently used in computer vision with satisfying results. However, adversarial perturbation on text have turned out to be more challenging due to the discrete nature of text. The generated adversarial text might not sound natural or does not preserve semantics, which is the key for real world applications where text classification is based on semantic meaning. In this paper, we describe a new way for generating adversarial samples by using pseudo-labeled in-domain text data to train a seq2seq model for adversarial generation and combine it with paraphrase detection. We showcase the benefit of our approach for a real-world Natural Language Understanding (NLU) task, which maps a user\u2019s request to an intent. Furthermore, we experiment with gradient-based training for the NLU task and try using token importance scores to guide the adversarial text generation. We show that our approach can generate realistic and relevant adversarial samples compared to other state-of-the-art adversarial training methods. Applying adversarial training using these generated samples helps the NLU model to recover up to 70% of these types of errors and makes the model more robust, especially in the tail distribution in a large scale real world application.", + "author": "Hieu Le; Dieu-thu Le; Verena Weber; Chris Church; Kay Rottmann; Melanie Bradford; Peter Chin", + "authorids": "/h/hieu-le/; /d/dieu-thu-le/; /v/verena-weber/; /c/chris-church/; /k/kay-rottmann/; /m/melanie-bradford/; /p/peter-chin/", + "bibtex": "@inproceedings{le-etal-2022-semi,\n title = \"Semi-supervised Adversarial Text Generation based on {S}eq2{S}eq models\",\n author = \"Le, Hieu and\n Le, Dieu-thu and\n Weber, Verena and\n Church, Chris and\n Rottmann, Kay and\n Bradford, Melanie and\n Chin, Peter\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.26/\",\n doi = \"10.18653/v1/2022.emnlp-industry.26\",\n pages = \"254--262\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.26.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.26/", + "pdf_size": 648882, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10924246938867067306&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Boston University, Boston, USA + Amazon Inc., Berlin, Germany; Amazon Inc., Berlin, Germany; Amazon Inc., Berlin, Germany; Amazon Inc., Berlin, Germany; Amazon Inc., Berlin, Germany; Amazon Inc., Berlin, Germany; Boston University, Boston, USA", + "aff_domain": "bu.edu;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;bu.edu", + "email": "bu.edu;amazon.com;amazon.com;amazon.com;amazon.com;amazon.com;bu.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1;1;1;1;1;0", + "aff_unique_norm": "Boston University;Amazon", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.bu.edu;https://www.amazon.de", + "aff_unique_abbr": "BU;Amazon", + "aff_campus_unique_index": "0+1;1;1;1;1;1;0", + "aff_campus_unique": "Boston;Berlin", + "aff_country_unique_index": "0+1;1;1;1;1;1;0", + "aff_country_unique": "United States;Germany" + }, + { + "id": "2022.findings-emnlp.462", + "title": "Semi-supervised New Slot Discovery with Incremental Clustering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Discovering new slots is critical to the success of dialogue systems. Most existing methods rely on automatic slot induction in unsupervised fashion or perform domain adaptation across zero or few-shot scenarios. They have difficulties in providing high-quality supervised signals to learn clustering-friendly features, and are limited in effectively transferring the prior knowledge from known slots to new slots. In this work, we propose a Semi-supervised Incremental Clustering method (SIC), to discover new slots with the aid of existing linguistic annotation models and limited known slot data. Specifically, we harvest slot value candidates with NLP model cues and innovatively formulate the slot discovery task under an incremental clustering framework. The model gradually calibrate slot representations under the supervision of generated pseudo-labels, and automatically learns to terminate when no more salient slot remains. Our thorough evaluation on five public datasets demonstrates that it significantly outperforms state-of-the-art models.", + "author": "Yuxia Wu; Lizi Liao; Xueming Qian; Tat-Seng Chua", + "authorids": "/y/yuxia-wu/; /l/lizi-liao/; /x/xueming-qian/; /t/tat-seng-chua/", + "bibtex": "@inproceedings{wu-etal-2022-semi,\n title = \"Semi-supervised New Slot Discovery with Incremental Clustering\",\n author = \"Wu, Yuxia and\n Liao, Lizi and\n Qian, Xueming and\n Chua, Tat-Seng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.462/\",\n doi = \"10.18653/v1/2022.findings-emnlp.462\",\n pages = \"6207--6218\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.462.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.462/", + "pdf_size": 10418532, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15870017078254745182&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Xi\u2019an Jiaotong University; Singapore Management University; Xi\u2019an Jiaotong University; Sea-NExT Joint Lab, NUS", + "aff_domain": "stu.xjtu.edu.cn;smu.edu.sg;mail.xjtu.edu.cn;nus.edu.sg", + "email": "stu.xjtu.edu.cn;smu.edu.sg;mail.xjtu.edu.cn;nus.edu.sg", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;2", + "aff_unique_norm": "Xi'an Jiaotong University;Singapore Management University;National University of Singapore", + "aff_unique_dep": ";;Sea-NExT Joint Lab", + "aff_unique_url": "https://www.xjtu.edu.cn;https://www.smu.edu.sg;https://www.nus.edu.sg", + "aff_unique_abbr": "XJTU;SMU;NUS", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;1", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.findings-emnlp.338", + "title": "SensePOLAR: Word sense aware interpretability for pre-trained contextual word embeddings", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Adding interpretability to word embeddings represents an area of active research in textrepresentation. Recent work has explored the potential of embedding words via so-called polardimensions (e.g. good vs. bad, correct vs. wrong). Examples of such recent approachesinclude SemAxis, POLAR, FrameAxis, and BiImp. Although these approaches provide interpretabledimensions for words, they have not been designed to deal with polysemy, i.e. they can not easily distinguish between different senses of words. To address this limitation, we present SensePOLAR, an extension of the original POLAR framework that enables wordsense aware interpretability for pre-trained contextual word embeddings. The resulting interpretable word embeddings achieve a level ofperformance that is comparable to original contextual word embeddings across a variety ofnatural language processing tasks including the GLUE and SQuAD benchmarks. Our workremoves a fundamental limitation of existing approaches by offering users sense aware interpretationsfor contextual word embeddings.", + "author": "Jan Engler; Sandipan Sikdar; Marlene Lutz; Markus Strohmaier", + "authorids": "/j/jan-engler/; /s/sandipan-sikdar/; /m/marlene-lutz/; /m/markus-strohmaier/", + "bibtex": "@inproceedings{engler-etal-2022-sensepolar,\n title = \"{S}ense{POLAR}: Word sense aware interpretability for pre-trained contextual word embeddings\",\n author = \"Engler, Jan and\n Sikdar, Sandipan and\n Lutz, Marlene and\n Strohmaier, Markus\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.338/\",\n doi = \"10.18653/v1/2022.findings-emnlp.338\",\n pages = \"4607--4619\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.338.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.338/", + "pdf_size": 595481, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15268581106609552549&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "RWTH Aachen; L3S Research Center; University of Mannheim; University of Mannheim+GESIS+CSH Vienna", + "aff_domain": "rwth-aachen.de;l3s.de;uni-mannheim.de;uni-mannheim.de", + "email": "rwth-aachen.de;l3s.de;uni-mannheim.de;uni-mannheim.de", + "github": "https://github.com/JanEnglerRWTH/4607", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;2+3+4", + "aff_unique_norm": "RWTH Aachen University;L3S Research Center;University of Mannheim;GESIS - Leibniz-Institut f\u00fcr Sozialwissenschaften e.V.;Complexity Science Hub Vienna", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.rwth-aachen.de;https://www.l3s.de;https://www.uni-mannheim.de;https://www.gesis.org/;https://www.csh.ac.at", + "aff_unique_abbr": "RWTH;;UM;GESIS;CSH", + "aff_campus_unique_index": "0;", + "aff_campus_unique": "Aachen;", + "aff_country_unique_index": "0;0;0;0+0+1", + "aff_country_unique": "Germany;Austria" + }, + { + "id": "2022.emnlp-main.699", + "title": "SentBS: Sentence-level Beam Search for Controllable Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "A wide range of control perspectives have been explored in controllable text generation. Structure-controlled summarization is recently proposed as a useful and interesting research direction. However, current structure-controlling methods have limited effectiveness in enforcing the desired structure. To address this limitation, we propose a sentence-level beam search generation method (SentBS), where evaluation is conducted throughout the generation process to select suitable sentences for subsequent generations. We experiment with different combinations of decoding methods to be used as sub-components by SentBS and evaluate results on the structure-controlled dataset MReD. Experiments show that all explored combinations for SentBS can improve the agreement between the generated text and the desired structure, with the best method significantly reducing the structural discrepancies suffered by the existing model, by approximately 68%.", + "author": "Chenhui Shen; Liying Cheng; Lidong Bing; Yang You; Luo Si", + "authorids": "/c/chenhui-shen/; /l/liying-cheng/; /l/lidong-bing/; /y/yang-you/; /l/luo-si/", + "bibtex": "@inproceedings{shen-etal-2022-sentbs,\n title = \"{S}ent{BS}: Sentence-level Beam Search for Controllable Summarization\",\n author = \"Shen, Chenhui and\n Cheng, Liying and\n Bing, Lidong and\n You, Yang and\n Si, Luo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.699/\",\n doi = \"10.18653/v1/2022.emnlp-main.699\",\n pages = \"10256--10265\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.699.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.699/", + "pdf_size": 324658, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12429754308017798060&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "DAMO Academy, Alibaba Group+National University of Singapore+Singapore University of Technology and Design; DAMO Academy, Alibaba Group+Singapore University of Technology and Design; DAMO Academy, Alibaba Group; National University of Singapore; DAMO Academy, Alibaba Group", + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;comp.nus.edu.sg;alibaba-inc.com", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;comp.nus.edu.sg;alibaba-inc.com", + "github": "https://github.com/Shen-Chenhui/SentBS", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1+2;0+2;0;1;0", + "aff_unique_norm": "Alibaba Group;National University of Singapore;Singapore University of Technology and Design", + "aff_unique_dep": "DAMO Academy;;", + "aff_unique_url": "https://www.alibaba-group.com;https://www.nus.edu.sg;https://www.sutd.edu.sg", + "aff_unique_abbr": "Alibaba;NUS;SUTD", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1+1;0+1;0;1;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.emnlp-main.221", + "title": "Sentence Representation Learning with Generative Objective rather than Contrastive Objective", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Though offering amazing contextualized token-level representations, current pre-trained language models take less attention on accurately acquiring sentence-level representation during their self-supervised pre-training. However, contrastive objectives which dominate the current sentence representation learning bring little linguistic interpretability and no performance guarantee on downstream semantic tasks. We instead propose a novel generative self-supervised learning objective based on phrase reconstruction. To overcome the drawbacks of previous generative methods, we carefully model intra-sentence structure by breaking down one sentence into pieces of important phrases. Empirical studies show that our generative learning achieves powerful enough performance improvement and outperforms the current state-of-the-art contrastive methods not only on the STS benchmarks, but also on downstream semantic retrieval and reranking tasks. Our code is available at https://github.com/chengzhipanpan/PaSeR.", + "author": "Bohong Wu; Hai Zhao", + "authorids": "/b/bohong-wu/; /h/hai-zhao/", + "bibtex": "@inproceedings{wu-zhao-2022-sentence,\n title = \"Sentence Representation Learning with Generative Objective rather than Contrastive Objective\",\n author = \"Wu, Bohong and\n Zhao, Hai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.221/\",\n doi = \"10.18653/v1/2022.emnlp-main.221\",\n pages = \"3356--3368\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.221.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.221/", + "pdf_size": 479946, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16547125315951344872&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science and Engineering, Shanghai Jiao Tong University+Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University; Department of Computer Science and Engineering, Shanghai Jiao Tong University+Key Laboratory of Shanghai Education Commission for Intelligent Interaction and Cognitive Engineering, Shanghai Jiao Tong University", + "aff_domain": "sjtu.edu.cn;cs.sjtu.edu.cn", + "email": "sjtu.edu.cn;cs.sjtu.edu.cn", + "github": "https://github.com/chengzhipanpan/PaSeR", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "Department of Computer Science and Engineering", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shanghai", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.28", + "title": "Sentence-Incremental Neural Coreference Resolution", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We propose a sentence-incremental neural coreference resolution system which incrementally builds clusters after marking mention boundaries in a shift-reduce method. The system is aimed at bridging two recent approaches at coreference resolution: (1) state-of-the-art non-incremental models that incur quadratic complexity in document length with high computational cost, and (2) memory network-based models which operate incrementally but do not generalize beyond pronouns. For comparison, we simulate an incremental setting by constraining non-incremental systems to form partial coreference chains before observing new sentences. In this setting, our system outperforms comparable state-of-the-art methods by 2 F1 on OntoNotes and 6.8 F1 on the CODI-CRAC 2021 corpus. In a conventional coreference setup, our system achieves 76.3 F1 on OntoNotes and 45.5 F1 on CODI-CRAC 2021, which is comparable to state-of-the-art baselines. We also analyze variations of our system and show that the degree of incrementality in the encoder has a surprisingly large effect on the resulting performance.", + "author": "Matt Grenander; Shay B. Cohen; Mark Steedman", + "authorids": "/m/matt-grenander/; /s/shay-b-cohen/; /m/mark-steedman/", + "bibtex": "@inproceedings{grenander-etal-2022-sentence,\n title = \"Sentence-Incremental Neural Coreference Resolution\",\n author = \"Grenander, Matt and\n Cohen, Shay B. and\n Steedman, Mark\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.28/\",\n doi = \"10.18653/v1/2022.emnlp-main.28\",\n pages = \"427--443\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.28.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.28/", + "pdf_size": 385200, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6188650666390132541&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": "School of Informatics, University of Edinburgh; School of Informatics, University of Edinburgh; School of Informatics, University of Edinburgh", + "aff_domain": "ed.ac.uk;inf.ed.ac.uk;ed.ac.uk", + "email": "ed.ac.uk;inf.ed.ac.uk;ed.ac.uk", + "github": "https://github.com/mgrenander/sentence-incremental-coref", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Edinburgh", + "aff_unique_dep": "School of Informatics", + "aff_unique_url": "https://www.ed.ac.uk", + "aff_unique_abbr": "Edinburgh", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Edinburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.682", + "title": "Sentence-level Media Bias Analysis Informed by Discourse Structures", + "track": "main", + "status": "Main", + "award": false, + "abstract": "As polarization continues to rise among both the public and the news media, increasing attention has been devoted to detecting media bias. Most recent work in the NLP community, however, identify bias at the level of individual articles. However, each article itself comprises multiple sentences, which vary in their ideological bias. In this paper, we aim to identify sentences within an article that can illuminate and explain the overall bias of the entire article. We show that understanding the discourse role of a sentence in telling a news story, as well as its relation with nearby sentences, can reveal the ideological leanings of an author even when the sentence itself appears merely neutral. In particular, we consider using a functional news discourse structure and PDTB discourse relations to inform bias sentence identification, and distill the auxiliary knowledge from the two types of discourse structure into our bias sentence identification system. Experimental results on benchmark datasets show that incorporating both the global functional discourse structure and local rhetorical discourse relations can effectively increase the recall of bias sentence identification by 8.27% - 8.62%, as well as increase the precision by 2.82% - 3.48%.", + "author": "Yuanyuan Lei; Ruihong Huang; Lu Wang; Nick Beauchamp", + "authorids": "/y/yuanyuan-lei/; /r/ruihong-huang/; /l/lu-wang/; /n/nick-beauchamp/", + "bibtex": "@inproceedings{lei-etal-2022-sentence,\n title = \"Sentence-level Media Bias Analysis Informed by Discourse Structures\",\n author = \"Lei, Yuanyuan and\n Huang, Ruihong and\n Wang, Lu and\n Beauchamp, Nick\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.682/\",\n doi = \"10.18653/v1/2022.emnlp-main.682\",\n pages = \"10040--10050\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.682.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.682/", + "pdf_size": 335078, + "gs_citation": 37, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=500605816471788480&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Computer Science and Engineering, Texas A&M University; Computer Science and Engineering, Texas A&M University; Computer Science and Engineering, University of Michigan; Political Science, Northeastern University", + "aff_domain": "tamu.edu;tamu.edu;umich.edu;northeastern.edu", + "email": "tamu.edu;tamu.edu;umich.edu;northeastern.edu", + "github": "https://github.com/yuanyuanlei-nlp/bias_sentence_discourse_emnlp_2022", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;2", + "aff_unique_norm": "Texas A&M University;University of Michigan;Northeastern University", + "aff_unique_dep": "Computer Science and Engineering;Computer Science and Engineering;Political Science", + "aff_unique_url": "https://www.tamu.edu;https://www.umich.edu;https://www.northeastern.edu", + "aff_unique_abbr": "TAMU;UM;NU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Ann Arbor", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.332", + "title": "Sentiment-Aware Word and Sentence Level Pre-training for Sentiment Analysis", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Most existing pre-trained language representation models (PLMs) are sub-optimal in sentiment analysis tasks, as they capture the sentiment information from word-level while under-considering sentence-level information. In this paper, we propose SentiWSP, a novel Sentiment-aware pre-trained language model with combined Word-level and Sentence-level Pre-training tasks.The word level pre-training task detects replaced sentiment words, via a generator-discriminator framework, to enhance the PLM\u2019s knowledge about sentiment words.The sentence level pre-training task further strengthens the discriminator via a contrastive learning framework, with similar sentences as negative samples, to encode sentiments in a sentence.Extensive experimental results show that SentiWSP achieves new state-of-the-art performance on various sentence-level and aspect-level sentiment classification benchmarks. We have made our code and model publicly available at https://github.com/XMUDM/SentiWSP.", + "author": "Shuai Fan; Chen Lin; Haonan Li; Zhenghao Lin; Jinsong Su; Hang Zhang; Yeyun Gong; JIan Guo; Nan Duan", + "authorids": "/s/shuai-fan/; /c/chen-lin/; /h/haonan-li/; /z/zhenghao-lin/; /j/jinsong-su/; /h/hang-zhang/; /y/yeyun-gong/; /j/jian-guo/; /n/nan-duan/", + "bibtex": "@inproceedings{fan-etal-2022-sentiment,\n title = \"Sentiment-Aware Word and Sentence Level Pre-training for Sentiment Analysis\",\n author = \"Fan, Shuai and\n Lin, Chen and\n Li, Haonan and\n Lin, Zhenghao and\n Su, Jinsong and\n Zhang, Hang and\n Gong, Yeyun and\n Guo, JIan and\n Duan, Nan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.332/\",\n doi = \"10.18653/v1/2022.emnlp-main.332\",\n pages = \"4984--4994\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.332.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.332/", + "pdf_size": 399188, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3413869858284416438&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "School of Informatics, Xiamen University, China; School of Informatics, Xiamen University, China; The University of Melbourne, Australia; School of Informatics, Xiamen University, China; School of Informatics, Xiamen University, China; IDEA Research, China; Microsoft Research Asia; IDEA Research, China; Microsoft Research Asia", + "aff_domain": "xmu.edu.cn;xmu.edu.cn;unimelb.edu.au;xmu.edu.cn;xmu.edu.cn;idea.edu.cn;microsoft.com;idea.edu.cn;microsoft.com", + "email": "xmu.edu.cn;xmu.edu.cn;unimelb.edu.au;xmu.edu.cn;xmu.edu.cn;idea.edu.cn;microsoft.com;idea.edu.cn;microsoft.com", + "github": "https://github.com/XMUDM/SentiWSP", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;1;0;0;2;3;2;3", + "aff_unique_norm": "Xiamen University;The University of Melbourne;IDEA Research;Microsoft Research", + "aff_unique_dep": "School of Informatics;;;Research", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.unimelb.edu.au;;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "XMU;UniMelb;;MSR Asia", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;1;0;0;0;0;0;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "2022.findings-emnlp.288", + "title": "SepLL: Separating Latent Class Labels from Weak Supervision Noise", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In the weakly supervised learning paradigm, labeling functions automatically assign heuristic, often noisy, labels to data samples. In this work, we provide a method for learning from weak labels by separating two types of complementary information associated with the labeling functions: information related to the target label and information specific to one labeling function only. Both types of information are reflected to different degrees by all labeled instances. In contrast to previous works that aimed at correcting or removing wrongly labeled instances, we learn a branched deep model that uses all data as-is, but splits the labeling function information in the latent space. Specifically, we propose the end-to-end model SepLL which extends a transformer classifier by introducing a latent space for labeling function specific and task-specific information. The learning signal is only given by the labeling functions matches, no pre-processing or label model is required for our method. Notably, the task prediction is made from the latent layer without any direct task signal. Experiments on Wrench text classification tasks show that our model is competitive with the state-of-the-art, and yields a new best average performance.", + "author": "Andreas Stephan; Vasiliki Kougia; Benjamin Roth", + "authorids": "/a/andreas-stephan/; /v/vasiliki-kougia/; /b/benjamin-roth/", + "bibtex": "@inproceedings{stephan-etal-2022-sepll,\n title = \"{S}ep{LL}: Separating Latent Class Labels from Weak Supervision Noise\",\n author = \"Stephan, Andreas and\n Kougia, Vasiliki and\n Roth, Benjamin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.288/\",\n doi = \"10.18653/v1/2022.findings-emnlp.288\",\n pages = \"3918--3929\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.288.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.288/", + "pdf_size": 473598, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7066798782591908598&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 6, + "aff": "Research Group Data Mining and Machine Learning, Faculty of Computer Science, University of Vienna, Vienna, Austria + UniVie Doctoral School Computer Science, Vienna, Austria; Research Group Data Mining and Machine Learning, Faculty of Computer Science, University of Vienna, Vienna, Austria + UniVie Doctoral School Computer Science, Vienna, Austria; Faculty of Philological and Cultural Studies, University of Vienna, Vienna, Austria", + "aff_domain": "univie.ac.at;univie.ac.at;univie.ac.at", + "email": "univie.ac.at;univie.ac.at;univie.ac.at", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;0", + "aff_unique_norm": "University of Vienna", + "aff_unique_dep": "Faculty of Computer Science", + "aff_unique_url": "https://www.univie.ac.at", + "aff_unique_abbr": "Uni Vienna", + "aff_campus_unique_index": "0+0;0+0;0", + "aff_campus_unique": "Vienna", + "aff_country_unique_index": "0+0;0+0;0", + "aff_country_unique": "Austria" + }, + { + "id": "2022.emnlp-main.620", + "title": "Sequence Models for Document Structure Identification in an Undeciphered Script", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This work describes the first thorough analysis of \u201cheader\u201d signs in proto-Elamite, an undeciphered script from 3100-2900 BCE. Headers are a category of signs which have been provisionally identified through painstaking manual analysis of this script by domain experts. We use unsupervised neural and statistical sequence modeling techniques to provide new and independent evidence for the existence of headers, without supervision from domain experts. Having affirmed the existence of headers as a legitimate structural feature, we next arrive at a richer understanding of their possible meaning and purpose by (i) examining which features predict their presence; (ii) identifying correlations between these features and other document properties; and (iii) examining cases where these features predict the presence of a header in texts where domain experts do not expect one (or vice versa). We provide more concrete processes for labeling headers in this corpus and a clearer justification for existing intuitions about document structure in proto-Elamite.", + "author": "Logan Born; M. Monroe; Kathryn Kelley; Anoop Sarkar", + "authorids": "/l/logan-born/; /m/m-monroe/; /k/kathryn-kelley/; /a/anoop-sarkar/", + "bibtex": "@inproceedings{born-etal-2022-sequence,\n title = \"Sequence Models for Document Structure Identification in an Undeciphered Script\",\n author = \"Born, Logan and\n Monroe, M. and\n Kelley, Kathryn and\n Sarkar, Anoop\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.620/\",\n doi = \"10.18653/v1/2022.emnlp-main.620\",\n pages = \"9111--9121\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.620.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.620/", + "pdf_size": 1385278, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3085608014150589624&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Simon Fraser University School of Computing Science; University of British Columbia Department of Philosophy; Universit\u00e0 di Bologna Dipartimento di Filologia Classica e Italianistica; Simon Fraser University School of Computing Science", + "aff_domain": "sfu.ca;ubc.ca;unibo.it;cs.sfu.ca", + "email": "sfu.ca;ubc.ca;unibo.it;cs.sfu.ca", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Simon Fraser University;University of British Columbia;Universit\u00e0 di Bologna", + "aff_unique_dep": "School of Computing Science;Department of Philosophy;Dipartimento di Filologia Classica e Italianistica", + "aff_unique_url": "https://www.sfu.ca;https://www.ubc.ca;https://www.unibo.it", + "aff_unique_abbr": "SFU;UBC;Unibo", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "Canada;Italy" + }, + { + "id": "2022.findings-emnlp.87", + "title": "Sequential Topic Selection Model with Latent Variable for Topic-Grounded Dialogue", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recently, topic-grounded dialogue system has attracted significant attention due to its effectiveness in predicting the next topic to yield better responses via the historical context and given topic sequence. However, almost all existing topic prediction solutions focus on only the current conversation and corresponding topic sequence to predict the next conversation topic, without exploiting other topic-guided conversations which may contain relevant topic-transitions to current conversation. To address the problem, in this paper we propose a novel approach, named Sequential Global Topic Attention (SGTA) to exploit topic transition over all conversations in a subtle way for better modeling post-to-response topic-transition and guiding the response generation to the current conversation. Specifically, we introduce a latent space modeled as a Multivariate Skew-Normal distribution with hybrid kernel functions to flexibly integrate the global-level information with sequence-level information, and predict the topic based on the distribution sampling results. We also leverage a topic-aware prior-posterior approach for secondary selection of predicted topics, which is utilized to optimize the response generation task. Extensive experiments demonstrate that our model outperforms competitive baselines on prediction and generation tasks.", + "author": "Xiao-Fei Wen; Wei Wei; Xian-Ling Mao", + "authorids": "/x/xiao-fei-wen/; /w/wei-wei/; /x/xian-ling-mao/", + "bibtex": "@inproceedings{wen-etal-2022-sequential,\n title = \"Sequential Topic Selection Model with Latent Variable for Topic-Grounded Dialogue\",\n author = \"Wen, Xiao-Fei and\n Wei, Wei and\n Mao, Xian-Ling\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.87/\",\n doi = \"10.18653/v1/2022.findings-emnlp.87\",\n pages = \"1209--1219\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.87.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.87/", + "pdf_size": 1436101, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1391572056581888164&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "Cognitive Computing and Intelligent Information Processing Laboratory, School of Computer Science and Technology, Huazhong University of Science and Technology + Joint Laboratory of HUST and Pingan Property & Casualty Research (HPL); Cognitive Computing and Intelligent Information Processing Laboratory, School of Computer Science and Technology, Huazhong University of Science and Technology + Joint Laboratory of HUST and Pingan Property & Casualty Research (HPL); Department of Computer Science and Technology, Beijing Institute of Technology", + "aff_domain": "hust.edu.cn;hust.edu.cn;bit.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;bit.edu.cn", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0+0;1", + "aff_unique_norm": "Huazhong University of Science and Technology;Beijing Institute of Technology", + "aff_unique_dep": "School of Computer Science and Technology;Department of Computer Science and Technology", + "aff_unique_url": "http://www.hust.edu.cn;http://www.bit.edu.cn/", + "aff_unique_abbr": "HUST;BIT", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.509", + "title": "Sequentially Controlled Text Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "While GPT-2 generates sentences that are remarkably human-like, longer documents can ramble and do not follow human-like writing structure. We study the problem of imposing structure on long-range text. We propose a novel controlled text generation task, sequentially controlled text generation, and identify a dataset, NewsDiscourse as a starting point for this task. We develop a sequential controlled text generation pipeline with generation and editing. We test different degrees of structural awareness and show that, in general, more structural awareness results in higher control- accuracy, grammaticality, coherency and topicality, approaching human-level writing performance.", + "author": "Alexander Spangher; Yao Ming; Xinyu Hua; Nanyun Peng", + "authorids": "/a/alexander-spangher/; /y/yao-ming/; /x/xinyu-hua/; /n/nanyun-peng/", + "bibtex": "@inproceedings{spangher-etal-2022-sequentially,\n title = \"Sequentially Controlled Text Generation\",\n author = \"Spangher, Alexander and\n Ming, Yao and\n Hua, Xinyu and\n Peng, Nanyun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.509/\",\n doi = \"10.18653/v1/2022.findings-emnlp.509\",\n pages = \"6848--6866\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.509.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.509/", + "pdf_size": 1509631, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16809782162649257972&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "University of Southern California; Bloomberg; Bloomberg; University of California Los Angeles", + "aff_domain": "usc.edu;bloomberg.net;bloomberg.net;cs.ucla.edu", + "email": "usc.edu;bloomberg.net;bloomberg.net;cs.ucla.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;2", + "aff_unique_norm": "University of Southern California;Bloomberg;University of California, Los Angeles", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.usc.edu;https://www.bloomberg.com;https://www.ucla.edu", + "aff_unique_abbr": "USC;Bloomberg;UCLA", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.200", + "title": "SetGNER: General Named Entity Recognition as Entity Set Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently, joint recognition of flat, nested and discontinuous entities has received increasing attention. Motivated by the observation that the target output of NER is essentially a set of sequences, we propose a novel entity set generation framework for general NER scenes in this paper. Different from sequence-to-sequence NER methods, our method does not force the entities to be generated in a predefined order and can get rid of the problem of error propagation and inefficient decoding. Distinguished from the set-prediction NER framework, our method treats each entity as a sequence and is capable of recognizing discontinuous mentions. Given an input sentence, the model first encodes the sentence in word-level and detects potential entity mentions based on the encoder\u2019s output, then reconstructs entity mentions from the detected entity heads in parallel. To let the encoder of our model capture better right-to-left semantic structure, we also propose an auxiliary Inverse Generation Training task. Extensive experiments show that our model (w/o. Inverse Generation Training) outperforms state-of-the-art generative NER models by a large margin on two discontinuous NER datasets, two nested NER datasets and one flat NER dataset. Besides, the auxiliary Inverse Generation Training task is found to further improve the model\u2019s performance on the five datasets.", + "author": "Yuxin He; Buzhou Tang", + "authorids": "/y/yuxin-he/; /b/buzhou-tang/", + "bibtex": "@inproceedings{he-tang-2022-setgner,\n title = \"{S}et{GNER}: General Named Entity Recognition as Entity Set Generation\",\n author = \"He, Yuxin and\n Tang, Buzhou\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.200/\",\n doi = \"10.18653/v1/2022.emnlp-main.200\",\n pages = \"3074--3085\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.200.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.200/", + "pdf_size": 987774, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=709554470641079224&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 0, + "aff": "Department of Computer Science, Harbin Institute of Technology, Shenzhen, China+Peng Cheng Laboratory, Shenzhen, China; Department of Computer Science, Harbin Institute of Technology, Shenzhen, China+Peng Cheng Laboratory, Shenzhen, China", + "aff_domain": "stu.hit.edu.cn;gmail.com", + "email": "stu.hit.edu.cn;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;0+1", + "aff_unique_norm": "Harbin Institute of Technology;Peng Cheng Laboratory", + "aff_unique_dep": "Department of Computer Science;", + "aff_unique_url": "http://en.hhit.edu.cn/;", + "aff_unique_abbr": "HIT;", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Shenzhen", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.417", + "title": "Sharpness-Aware Minimization with Dynamic Reweighting", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Deep neural networks are often overparameterized and may not easily achieve model generalization. Adversarial training has shown effectiveness in improving generalization by regularizing the change of loss on top of adversarially chosen perturbations. The recently proposed sharpness-aware minimization (SAM) algorithm conducts adversarial weight perturbation, encouraging the model to converge to a flat minima. SAM finds a common adversarial weight perturbation per-batch. Although per-instance adversarial weight perturbations are stronger adversaries and can potentially lead to better generalization performance, their computational cost is very high and thus it is impossible to use per-instance perturbations efficiently in SAM. In this paper, we tackle this efficiency bottleneck and propose sharpness-aware minimization with dynamic reweighting (delta-SAM). Our theoretical analysis motivates that it is possible to approach the stronger, per-instance adversarial weight perturbations using reweighted per-batch weight perturbations. delta-SAM dynamically reweights perturbation within each batch according to the theoretically principled weighting factors, serving as a good approximation to per-instance perturbation. Experiments on various natural language understanding tasks demonstrate the effectiveness of delta-SAM.", + "author": "Wenxuan Zhou; Fangyu Liu; Huan Zhang; Muhao Chen", + "authorids": "/w/wenxuan-zhou/; /f/fangyu-liu/; /h/huan-zhang/; /m/muhao-chen/", + "bibtex": "@inproceedings{zhou-etal-2022-sharpness,\n title = \"Sharpness-Aware Minimization with Dynamic Reweighting\",\n author = \"Zhou, Wenxuan and\n Liu, Fangyu and\n Zhang, Huan and\n Chen, Muhao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.417/\",\n doi = \"10.18653/v1/2022.findings-emnlp.417\",\n pages = \"5686--5699\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.417.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.417/", + "pdf_size": 397822, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6524333310058332970&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of Southern California; University of Cambridge; Carnegie Mellon University; University of Southern California", + "aff_domain": "usc.edu;cam.ac.uk;huan-zhang.com;usc.edu", + "email": "usc.edu;cam.ac.uk;huan-zhang.com;usc.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "University of Southern California;University of Cambridge;Carnegie Mellon University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.usc.edu;https://www.cam.ac.uk;https://www.cmu.edu", + "aff_unique_abbr": "USC;Cambridge;CMU", + "aff_campus_unique_index": "0;1;0", + "aff_campus_unique": "Los Angeles;Cambridge;", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "2022.emnlp-main.351", + "title": "Should We Ban English NLP for a Year?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Around two thirds of NLP research at top venues is devoted exclusively to developing technology for speakers of English, most speech data comes from young urban speakers, and most texts used to train language models come from male writers. These biases feed into consumer technologies to widen existing inequality gaps, not only within, but also across, societies. Many have argued that it is almost impossible to mitigate inequality amplification. I argue that, on the contrary, it is quite simple to do so, and that counter-measures would have little-to-no negative impact, except for, perhaps, in the very short term.", + "author": "Anders S\u00f8gaard", + "authorids": "/a/anders-sogaard/", + "bibtex": "@inproceedings{sogaard-2022-ban,\n title = \"Should We Ban {E}nglish {NLP} for a Year?\",\n author = \"S{\\o}gaard, Anders\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.351/\",\n doi = \"10.18653/v1/2022.emnlp-main.351\",\n pages = \"5254--5260\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.351.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.351/", + "pdf_size": 223311, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15619255703728785151&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Dpt. of Computer Science, Pioneer Centre for Artificial Intelligence, and Dpt. of Philosophy, University of Copenhagen", + "aff_domain": "di.ku.dk", + "email": "di.ku.dk", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "University of Copenhagen", + "aff_unique_dep": "Dpt. of Computer Science", + "aff_unique_url": "https://www.ku.dk", + "aff_unique_abbr": "UCPH", + "aff_country_unique_index": "0", + "aff_country_unique": "Denmark" + }, + { + "id": "2022.emnlp-industry.56", + "title": "SimANS: Simple Ambiguous Negatives Sampling for Dense Text Retrieval", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Sampling proper negatives from a large document pool is vital to effectively train a dense retrieval model. However, existing negative sampling strategies suffer from the uninformative or false negative problem. In this work, we empirically show that according to the measured relevance scores, the negatives ranked around the positives are generally more informative and less likely to be false negatives. Intuitively, these negatives are not too hard (may be false negatives) or too easy (uninformative). They are the ambiguous negatives and need more attention during training.Thus, we propose a simple ambiguous negatives sampling method, SimANS, which incorporates a new sampling probability distribution to sample more ambiguous negatives.Extensive experiments on four public and one industry datasets show the effectiveness of our approach.We made the code and models publicly available in https://github.com/microsoft/SimXNS.", + "author": "Kun Zhou; Yeyun Gong; Xiao Liu; Wayne Xin Zhao; Yelong Shen; Anlei Dong; Jingwen Lu; Rangan Majumder; Ji-rong Wen; Nan Duan", + "authorids": "/k/kun-zhou/; /y/yeyun-gong/; /x/xiao-liu/; /w/wayne-xin-zhao/; /y/yelong-shen/; /a/anlei-dong/; /j/jingwen-lu/; /r/rangan-majumder/; /j/ji-rong-wen/; /n/nan-duan/", + "bibtex": "@inproceedings{zhou-etal-2022-simans,\n title = \"{S}im{ANS}: Simple Ambiguous Negatives Sampling for Dense Text Retrieval\",\n author = \"Zhou, Kun and\n Gong, Yeyun and\n Liu, Xiao and\n Zhao, Wayne Xin and\n Shen, Yelong and\n Dong, Anlei and\n Lu, Jingwen and\n Majumder, Rangan and\n Wen, Ji-rong and\n Duan, Nan\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.56/\",\n doi = \"10.18653/v1/2022.emnlp-industry.56\",\n pages = \"548--559\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.56.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.56/", + "pdf_size": 5685017, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2606021611371563634&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;;;", + "aff_domain": ";;;;;;;;;", + "email": ";;;;;;;;;", + "github": "https://github.com/microsoft/SimXNS", + "project": "", + "author_num": 10 + }, + { + "id": "2022.emnlp-main.378", + "title": "SimQA: Detecting Simultaneous MT Errors through Word-by-Word Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Detractors of neural machine translation admit that while its translations are fluent, it sometimes gets key facts wrong. This is particularly important in simultaneous interpretation where translations have to be provided as fast as possible: before a sentence is complete. Yet, evaluations of simultaneous machine translation (SimulMT) fail to capture if systems correctly translate the most salient elements of a question: people, places, and dates. To address this problem, we introduce a downstream word-by-word question answering evaluation task (SimQA): given a source language question, translate the question word by word into the target language, and answer as soon as possible. SimQA jointly measures whether the SimulMT models translate the question quickly and accurately, and can reveal shortcomings in existing neural systems\u2014hallucinating or omitting facts.", + "author": "HyoJung Han; Marine Carpuat; Jordan Boyd-Graber", + "authorids": "/h/hyojung-han/; /m/marine-carpuat/; /j/jordan-boyd-graber/", + "bibtex": "@inproceedings{han-etal-2022-simqa,\n title = \"{S}im{QA}: Detecting Simultaneous {MT} Errors through Word-by-Word Question Answering\",\n author = \"Han, HyoJung and\n Carpuat, Marine and\n Boyd-Graber, Jordan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.378/\",\n doi = \"10.18653/v1/2022.emnlp-main.378\",\n pages = \"5598--5616\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.378.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.378/", + "pdf_size": 662235, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6030355868181035271&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Computer Science, University of Maryland; Computer Science, University of Maryland; CS, UMIACS, iSchool, LCS, University of Maryland", + "aff_domain": "cs.umd.edu;cs.umd.edu;umiacs.umd.edu", + "email": "cs.umd.edu;cs.umd.edu;umiacs.umd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Maryland", + "aff_unique_dep": "Computer Science", + "aff_unique_url": "https://www/umd.edu", + "aff_unique_abbr": "UMD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.417", + "title": "Simple Questions Generate Named Entity Recognition Datasets", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent named entity recognition (NER) models often rely on human-annotated datasets requiring the vast engagement of professional knowledge on the target domain and entities. This work introduces an ask-to-generate approach, which automatically generates NER datasets by asking simple natural language questions to an open-domain question answering system (e.g., \u201cWhich disease?\u201d). Despite using fewer training resources, our models solely trained on the generated datasets largely outperform strong low-resource models by 19.5 F1 score across six popular NER benchmarks. Our models also show competitive performance with rich-resource models that additionally leverage in-domain dictionaries provided by domain experts. In few-shot NER, we outperform the previous best model by 5.2 F1 score on three benchmarks and achieve new state-of-the-art performance.", + "author": "Hyunjae Kim; Jaehyo Yoo; Seunghyun Yoon; Jinhyuk Lee; Jaewoo Kang", + "authorids": "/h/hyunjae-kim/; /j/jaehyo-yoo/; /s/seunghyun-yoon/; /j/jinhyuk-lee/; /j/jaewoo-kang/", + "bibtex": "@inproceedings{kim-etal-2022-simple,\n title = \"Simple Questions Generate Named Entity Recognition Datasets\",\n author = \"Kim, Hyunjae and\n Yoo, Jaehyo and\n Yoon, Seunghyun and\n Lee, Jinhyuk and\n Kang, Jaewoo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.417/\",\n doi = \"10.18653/v1/2022.emnlp-main.417\",\n pages = \"6220--6236\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.417.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.417/", + "pdf_size": 2126011, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6640892388876363977&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Korea University; Korea University; Adobe Research; Korea University + AIGEN Sciences; Korea University + AIGEN Sciences", + "aff_domain": "korea.ac.kr;korea.ac.kr;adobe.com;korea.ac.kr;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;adobe.com;korea.ac.kr;korea.ac.kr", + "github": "https://github.com/dmis-lab/GeNER", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0+2;0+2", + "aff_unique_norm": "Korea University;Adobe;AIGEN Sciences", + "aff_unique_dep": ";Adobe Research;", + "aff_unique_url": "https://www.korea.ac.kr;https://research.adobe.com;", + "aff_unique_abbr": "KU;Adobe;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0+1;0+1", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "2022.findings-emnlp.252", + "title": "Simple but Challenging: Natural Language Inference Models Fail on Simple Sentences", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Natural language inference (NLI) is a task to infer the relationship between a premise and a hypothesis (e.g., entailment, neutral, or contradiction), and transformer-based models perform well on current NLI datasets such as MNLI and SNLI. Nevertheless, given the linguistic complexity of the large-scale datasets, it remains controversial whether these models can truly infer the relationship between sentences or they simply guess the answer via shallow heuristics. Here, we introduce a controlled evaluation set called Simple Pair to test the basic sentence inference ability of NLI models using sentences with syntactically simple structures. Three popular transformer-based models, i.e., BERT, RoBERTa, and DeBERTa, are employed. We find that these models fine-tuned on MNLI or SNLI perform very poorly on Simple Pair (< 35.4% accuracy). Further analyses reveal event coreference and compositional binding problems in these models. To improve the model performance, we augment the training set, i.e., MNLI or SNLI, with a few examples constructed based on Simple Pair ( 1% of the size of the original SNLI/MNLI training sets). Models fine-tuned on the augmented training set maintain high performance on MNLI/SNLI and perform very well on Simple Pair (~100% accuracy). Furthermore, the positive performance of the augmented training models can transfer to more complex examples constructed based on sentences from MNLI and SNLI. Taken together, the current work shows that (1) models achieving high accuracy on mainstream large-scale datasets still lack the capacity to draw accurate inferences on simple sentences, and (2) augmenting mainstream datasets with a small number of target simple sentences can effectively improve model performance.", + "author": "Cheng Luo; Wei Liu; Jieyu Lin; Jiajie Zou; Ming Xiang; Nai Ding", + "authorids": "/c/cheng-luo/; /w/wei-liu/; /j/jieyu-lin/; /j/jiajie-zou/; /m/ming-xiang/; /n/nai-ding/", + "bibtex": "@inproceedings{luo-etal-2022-simple-challenging,\n title = \"Simple but Challenging: Natural Language Inference Models Fail on Simple Sentences\",\n author = \"Luo, Cheng and\n Liu, Wei and\n Lin, Jieyu and\n Zou, Jiajie and\n Xiang, Ming and\n Ding, Nai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.252/\",\n doi = \"10.18653/v1/2022.findings-emnlp.252\",\n pages = \"3449--3462\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.252.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.252/", + "pdf_size": 1438744, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1847208354840187953&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Zhejiang Lab / Hangzhou, China; Zhejiang University / Hangzhou, China; Zhejiang University / Hangzhou, China; Zhejiang University / Hangzhou, China; The University of Chicago / Chicago, United States; Zhejiang Lab / Hangzhou, China + Zhejiang University / Hangzhou, China", + "aff_domain": "zhejianglab.com;zju.edu.cn;zju.edu.cn;zju.edu.cn;uchicago.edu;zju.edu.cn", + "email": "zhejianglab.com;zju.edu.cn;zju.edu.cn;zju.edu.cn;uchicago.edu;zju.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;2;0+1", + "aff_unique_norm": "Zhejiang Lab;Zhejiang University;The University of Chicago", + "aff_unique_dep": ";;", + "aff_unique_url": ";http://www.zju.edu.cn;https://www.uchicago.edu", + "aff_unique_abbr": ";ZJU;UChicago", + "aff_campus_unique_index": "0;0;0;0;1;0+0", + "aff_campus_unique": "Hangzhou;Chicago", + "aff_country_unique_index": "0;0;0;0;1;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.735", + "title": "Simplified Graph Learning for Inductive Short Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Short text classification (STC) is hard as short texts lack context information and labeled data is not enough. Graph neural networks obtain the state-of-the-art on STC since they can merge various auxiliary information via the message passing framework. However, existing works conduct transductive learning, which requires retraining to accommodate new samples and takes large memory. In this paper, we present SimpleSTC which handles inductive STC problem but only leverages words. We construct word graph from an external large corpus to compensate for the lack of semantic information, and learn text graph to handle the lack of labeled data. Results show that SimpleSTC obtains state-of-the-art performance with lower memory consumption and faster inference speed.", + "author": "Kaixin Zheng; Yaqing Wang; Quanming Yao; Dejing Dou", + "authorids": "/k/kaixin-zheng/; /y/yaqing-wang/; /q/quanming-yao/; /d/dejing-dou/", + "bibtex": "@inproceedings{zheng-etal-2022-simplified,\n title = \"Simplified Graph Learning for Inductive Short Text Classification\",\n author = \"Zheng, Kaixin and\n Wang, Yaqing and\n Yao, Quanming and\n Dou, Dejing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.735/\",\n doi = \"10.18653/v1/2022.emnlp-main.735\",\n pages = \"10717--10724\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.735.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.735/", + "pdf_size": 4071690, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=715218909930326249&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 0, + "aff": "Baidu Research, Baidu Inc., China+Department of SIOE, Beihang University, China; Baidu Research, Baidu Inc., China; Department of EE, Tsinghua University, China; Baidu Research, Baidu Inc., China", + "aff_domain": "buaa.edu.cn;baidu.com;tsinghua.edu.cn;baidu.com", + "email": "buaa.edu.cn;baidu.com;tsinghua.edu.cn;baidu.com", + "github": "https://github.com/tata1661/SimpleSTC-EMNLP22", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;2;0", + "aff_unique_norm": "Baidu Inc.;Beihang University;Tsinghua University", + "aff_unique_dep": "Baidu Research;Department of SIOE;Department of EE", + "aff_unique_url": "https://www.baidu.com;http://www.buaa.edu.cn/;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "Baidu;;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.530", + "title": "SlovakBERT: Slovak Masked Language Model", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We introduce a new Slovak masked language model called SlovakBERT. This is to our best knowledge the first paper discussing Slovak transformers-based language models. We evaluate our model on several NLP tasks and achieve state-of-the-art results. This evaluation is likewise the first attempt to establish a benchmark for Slovak language models. We publish the masked language model, as well as the fine-tuned models for part-of-speech tagging, sentiment analysis and semantic textual similarity.", + "author": "Mat\u00fa\u0161 Pikuliak; \u0160tefan Grivalsk\u00fd; Martin Kon\u00f4pka; Miroslav Bl\u0161t\u00e1k; Martin Tamajka; Viktor Bachrat\u00fd; Marian Simko; Pavol Bal\u00e1\u017eik; Michal Trnka; Filip Uhl\u00e1rik", + "authorids": "/m/matus-pikuliak/; /s/stefan-grivalsky/; /m/martin-konopka/; /m/miroslav-blstak/; /m/martin-tamajka/; /v/viktor-bachraty/; /m/marian-simko/; /p/pavol-balazik/; /m/michal-trnka/; /f/filip-uhlarik/", + "bibtex": "@inproceedings{pikuliak-etal-2022-slovakbert,\n title = \"{S}lovak{BERT}: {S}lovak Masked Language Model\",\n author = \"Pikuliak, Mat{\\'u}{\\v{s}} and\n Grivalsk{\\'y}, {\\v{S}}tefan and\n Kon{\\^o}pka, Martin and\n Bl{\\v{s}}t{\\'a}k, Miroslav and\n Tamajka, Martin and\n Bachrat{\\'y}, Viktor and\n Simko, Marian and\n Bal{\\'a}{\\v{z}}ik, Pavol and\n Trnka, Michal and\n Uhl{\\'a}rik, Filip\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.530/\",\n doi = \"10.18653/v1/2022.findings-emnlp.530\",\n pages = \"7156--7168\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.530.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.530/", + "pdf_size": 291537, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12749945219208931789&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Kempelen Institute of Intelligent Technologies; Kempelen Institute of Intelligent Technologies; Kempelen Institute of Intelligent Technologies; Kempelen Institute of Intelligent Technologies; Kempelen Institute of Intelligent Technologies; Kempelen Institute of Intelligent Technologies; Kempelen Institute of Intelligent Technologies; Gerulata Technologies; Gerulata Technologies; Gerulata Technologies", + "aff_domain": "kinit.sk;kinit.sk;kinit.sk;kinit.sk;kinit.sk;kinit.sk;kinit.sk;gerulata.com;gerulata.com;gerulata.com", + "email": "kinit.sk;kinit.sk;kinit.sk;kinit.sk;kinit.sk;kinit.sk;kinit.sk;gerulata.com;gerulata.com;gerulata.com", + "github": "https://github.com/gerulata/slovakbert", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;0;0;1;1;1", + "aff_unique_norm": "Kempelen Institute of Intelligent Technologies;Gerulata Technologies", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.kempeleninstitute.com;", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "Hungary;" + }, + { + "id": "2022.findings-emnlp.163", + "title": "Snapshot-Guided Domain Adaptation for ELECTRA", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Discriminative pre-trained language models, such as ELECTRA, have achieved promising performances in a variety of general tasks. However, these generic pre-trained models struggle to capture domain-specific knowledge of domain-related tasks. In this work, we propose a novel domain-adaptation method for ELECTRA, which can dynamically select domain-specific tokens and guide the discriminator to emphasize them, without introducing new training parameters. We show that by re-weighting the losses of domain-specific tokens, ELECTRA can be effectively adapted to different domains. The experimental results in both computer science and biomedical domains show that the proposed method can achieve state-of-the-art results on the domain-related tasks.", + "author": "Daixuan Cheng; Shaohan Huang; Jianfeng Liu; Yuefeng Zhan; Hao Sun; Furu Wei; Denvy Deng; Qi Zhang", + "authorids": "/d/daixuan-cheng/; /s/shaohan-huang/; /j/jianfeng-liu/; /y/yuefeng-zhan/; /h/hao-sun/; /f/furu-wei/; /d/denvy-deng/; /q/qi-zhang/", + "bibtex": "@inproceedings{cheng-etal-2022-snapshot,\n title = \"Snapshot-Guided Domain Adaptation for {ELECTRA}\",\n author = \"Cheng, Daixuan and\n Huang, Shaohan and\n Liu, Jianfeng and\n Zhan, Yuefeng and\n Sun, Hao and\n Wei, Furu and\n Deng, Denvy and\n Zhang, Qi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.163/\",\n doi = \"10.18653/v1/2022.findings-emnlp.163\",\n pages = \"2226--2232\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.163.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.163/", + "pdf_size": 477593, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8342974410891594190&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation", + "aff_domain": "gmail.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "gmail.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "Microsoft Corporation", + "aff_unique_dep": "", + "aff_unique_url": "https://www.microsoft.com", + "aff_unique_abbr": "Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.159", + "title": "Social-aware Sparse Attention Network for Session-based Social Recommendation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Session-based Social Recommendation (SSR) aims to use users\u2019 social networks and historical sessions to provide more personalized recommendations for the current session.Unfortunately, existing SSR methods have two limitations.First, they do not screen users\u2019 useless social relationships and noisy irrelevant interactions.However, user preferences are mainly affected by several close friends and key interactions.Second, when modeling the current session, they do not take full advantage of user preference information.To tackle these issues, we propose a novel Social-aware Sparse Attention Network for SSR, abbreviated as SSAN.It mainly consists of the Heterogeneous Graph Embedding (HGE) module and the Social-aware Encoder-decoder Network (SEN) module.In the HGE module, we adopt a modified heterogeneous graph neural network, which focuses more on close friends and key historical interactions, to enhance user/item representations. In the SEN module, we use the user representation as a bridge between the Encoder and Decoder to incorporate user preferences when modeling the current session.Extensive experiments on two benchmark datasets demonstrate the superiority of SSAN over the state-of-the-art models.", + "author": "Kai Ouyang; Xianghong Xu; Chen Tang; Wang Chen; Haitao Zheng", + "authorids": "/k/kai-ouyang/; /x/xianghong-xu/; /c/chen-tang/; /w/wang-chen/; /h/haitao-zheng/", + "bibtex": "@inproceedings{ouyang-etal-2022-social,\n title = \"Social-aware Sparse Attention Network for Session-based Social Recommendation\",\n author = \"Ouyang, Kai and\n Xu, Xianghong and\n Tang, Chen and\n Chen, Wang and\n Zheng, Haitao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.159/\",\n doi = \"10.18653/v1/2022.findings-emnlp.159\",\n pages = \"2173--2183\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.159.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.159/", + "pdf_size": 351690, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3420833043382014369&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Shezhen International Graduate School, Tsinghua Universiy; Shezhen International Graduate School, Tsinghua Universiy; Shezhen International Graduate School, Tsinghua Universiy; Google Inc.; Shezhen International Graduate School, Tsinghua Universiy + Pengcheng Laboratory, Shenzhen, China, 518055", + "aff_domain": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;google.com;sz.tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;mails.tsinghua.edu.cn;google.com;sz.tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0+2", + "aff_unique_norm": "Tsinghua University;Google;Pengcheng Laboratory", + "aff_unique_dep": "International Graduate School;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.google.com;", + "aff_unique_abbr": "THU;Google;", + "aff_campus_unique_index": "0;0;0;1;0+0", + "aff_campus_unique": "Shenzhen;Mountain View", + "aff_country_unique_index": "0;0;0;1;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.539", + "title": "SocioProbe: What, When, and Where Language Models Learn about Sociodemographics", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained language models (PLMs) have outperformed other NLP models on a wide range of tasks. Opting for a more thorough understanding of their capabilities and inner workings, researchers have established the extend to which they capture lower-level knowledge like grammaticality, and mid-level semantic knowledge like factual understanding. However, there is still little understanding of their knowledge of higher-level aspects of language. In particular, despite the importance of sociodemographic aspects in shaping our language, the questions of whether, where, and how PLMs encode these aspects, e.g., gender or age, is still unexplored. We address this research gap by probing the sociodemographic knowledge of different single-GPU PLMs on multiple English data sets via traditional classifier probing and information-theoretic minimum description length probing. Our results show that PLMs do encode these sociodemographics, and that this knowledge is sometimes spread across the layers of some of the tested PLMs. We further conduct a multilingual analysis and investigate the effect of supplementary training to further explore to what extent, where, and with what amount of pre-training data the knowledge is encoded. Our overall results indicate that sociodemographic knowledge is still a major challenge for NLP. PLMs require large amounts of pre-training data to acquire the knowledge and models that excel in general language understanding do not seem to own more knowledge about these aspects.", + "author": "Anne Lauscher; Federico Bianchi; Samuel R. Bowman; Dirk Hovy", + "authorids": "/a/anne-lauscher/; /f/federico-bianchi/; /s/samuel-bowman/; /d/dirk-hovy/", + "bibtex": "@inproceedings{lauscher-etal-2022-socioprobe,\n title = \"{S}ocio{P}robe: What, When, and Where Language Models Learn about Sociodemographics\",\n author = \"Lauscher, Anne and\n Bianchi, Federico and\n Bowman, Samuel R. and\n Hovy, Dirk\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.539/\",\n doi = \"10.18653/v1/2022.emnlp-main.539\",\n pages = \"7901--7918\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.539.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.539/", + "pdf_size": 3924585, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6812970413070275198&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Data Science Group, University of Hamburg, Germany; StanfordNLP, Stanford University, CA, USA; New York University, NY, USA; MilaNLP, Bocconi University, Milan, Italy", + "aff_domain": "uni-hamburg.de;stanford.edu;nyu.edu;unibocconi.it", + "email": "uni-hamburg.de;stanford.edu;nyu.edu;unibocconi.it", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;3", + "aff_unique_norm": "University of Hamburg;Stanford University;New York University;Bocconi University", + "aff_unique_dep": "Data Science Group;StanfordNLP;;MilaNLP", + "aff_unique_url": "https://www.uni-hamburg.de;https://www.stanford.edu;https://www.nyu.edu;https://www.bocconi.edu", + "aff_unique_abbr": ";Stanford;NYU;", + "aff_campus_unique_index": "1;2;3", + "aff_campus_unique": ";CA;New York;Milan", + "aff_country_unique_index": "0;1;1;2", + "aff_country_unique": "Germany;United States;Italy" + }, + { + "id": "2022.findings-emnlp.9", + "title": "Soft-Labeled Contrastive Pre-Training for Function-Level Code Representation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Code contrastive pre-training has recently achieved significant progress on code-related tasks. In this paper, we present SCodeR, a Soft-labeled contrastive pre-training framework with two positive sample construction methods to learn functional-level Code Representation. Considering the relevance between codes in a large-scale code corpus, the soft-labeled contrastive pre-training can obtain fine-grained soft-labels through an iterative adversarial manner and use them to learn better code representation. The positive sample construction is another key for contrastive pre-training. Previous works use transformation-based methods like variable renaming to generate semantically equal positive codes. However, they usually result in the generated code with a highly similar surface form, and thus mislead the model to focus on superficial code structure instead of code semantics. To encourage SCodeR to capture semantic information from the code, we utilize code comments and abstract syntax sub-trees of the code to build positive samples. We conduct experiments on four code-related tasks over seven datasets. Extensive experimental results show that SCodeR achieves new state-of-the-art performance on all of them, which illustrates the effectiveness of the proposed pre-training method.", + "author": "Xiaonan Li; Daya Guo; Yeyun Gong; Yun Lin; Yelong Shen; Xipeng Qiu; Daxin Jiang; Weizhu Chen; Nan Duan", + "authorids": "/x/xiaonan-li/; /d/daya-guo/; /y/yeyun-gong/; /y/yun-lin/; /y/yelong-shen/; /x/xipeng-qiu/; /d/daxin-jiang/; /w/weizhu-chen/; /n/nan-duan/", + "bibtex": "@inproceedings{li-etal-2022-soft,\n title = \"Soft-Labeled Contrastive Pre-Training for Function-Level Code Representation\",\n author = \"Li, Xiaonan and\n Guo, Daya and\n Gong, Yeyun and\n Lin, Yun and\n Shen, Yelong and\n Qiu, Xipeng and\n Jiang, Daxin and\n Chen, Weizhu and\n Duan, Nan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.9/\",\n doi = \"10.18653/v1/2022.findings-emnlp.9\",\n pages = \"118--129\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.9.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.9/", + "pdf_size": 554248, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9617932298175136654&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff": "Shanghai Key Laboratory of Intelligent Information Processing, Fudan University+School of Computer Science, Fudan University; Microsoft; National University of Singapore; Microsoft; Microsoft; Shanghai Key Laboratory of Intelligent Information Processing, Fudan University+School of Computer Science, Fudan University; Microsoft; Microsoft; Microsoft", + "aff_domain": "fudan.edu.cn;microsoft.com;microsoft.com;nus.edu.sg;microsoft.com;fudan.edu.cn;microsoft.com;microsoft.com;microsoft.com", + "email": "fudan.edu.cn;microsoft.com;microsoft.com;nus.edu.sg;microsoft.com;fudan.edu.cn;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0+0;1;2;1;1;0+0;1;1;1", + "aff_unique_norm": "Fudan University;Microsoft Corporation;National University of Singapore", + "aff_unique_dep": "Shanghai Key Laboratory of Intelligent Information Processing;;", + "aff_unique_url": "https://www.fudan.edu.cn;https://www.microsoft.com;https://www.nus.edu.sg", + "aff_unique_abbr": "Fudan;Microsoft;NUS", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0+0;1;2;1;1;0+0;1;1;1", + "aff_country_unique": "China;United States;Singapore" + }, + { + "id": "2022.findings-emnlp.200", + "title": "SpaBERT: A Pretrained Language Model from Geographic Data for Geo-Entity Representation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Named geographic entities (geo-entities for short) are the building blocks of many geographic datasets. Characterizing geo-entities is integral to various application domains, such as geo-intelligence and map comprehension, while a key challenge is to capture the spatial-varying context of an entity. We hypothesize that we shall know the characteristics of a geo-entity by its surrounding entities, similar to knowing word meanings by their linguistic context. Accordingly, we propose a novel spatial language model, SpaBERT, which provides a general-purpose geo-entity representation based on neighboring entities in geospatial data. SpaBERT extends BERT to capture linearized spatial context, while incorporating a spatial coordinate embedding mechanism to preserve spatial relations of entities in the 2-dimensional space. SpaBERT is pretrained with masked language modeling and masked entity prediction tasks to learn spatial dependencies. We apply SpaBERT to two downstream tasks: geo-entity typing and geo-entity linking. Compared with the existing language models that do not use spatial context, SpaBERT shows significant performance improvement on both tasks. We also analyze the entity representation from SpaBERT in various settings and the effect of spatial coordinate embedding.", + "author": "Zekun Li; Jina Kim; Yao-Yi Chiang; Muhao Chen", + "authorids": "/z/zekun-li/; /j/jina-kim/; /y/yao-yi-chiang/; /m/muhao-chen/", + "bibtex": "@inproceedings{li-etal-2022-spabert,\n title = \"{S}pa{BERT}: A Pretrained Language Model from Geographic Data for Geo-Entity Representation\",\n author = \"Li, Zekun and\n Kim, Jina and\n Chiang, Yao-Yi and\n Chen, Muhao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.200/\",\n doi = \"10.18653/v1/2022.findings-emnlp.200\",\n pages = \"2757--2769\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.200.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.200/", + "pdf_size": 3191985, + "gs_citation": 35, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13953458309516499457&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science and Engineering, University of Minnesota, Twin Cities; Department of Computer Science and Engineering, University of Minnesota, Twin Cities; Department of Computer Science and Engineering, University of Minnesota, Twin Cities; Department of Computer Science, University of Southern California", + "aff_domain": "umn.edu;umn.edu;umn.edu;usc.edu", + "email": "umn.edu;umn.edu;umn.edu;usc.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "University of Minnesota;University of Southern California", + "aff_unique_dep": "Department of Computer Science and Engineering;Department of Computer Science", + "aff_unique_url": "https://www.umn.edu;https://www.usc.edu", + "aff_unique_abbr": "UMN;USC", + "aff_campus_unique_index": "0;0;0;1", + "aff_campus_unique": "Twin Cities;Los Angeles", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.227", + "title": "SpanProto: A Two-stage Span-based Prototypical Network for Few-shot Named Entity Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Few-shot Named Entity Recognition (NER) aims to identify named entities with very little annotated data. Previous methods solve this problem based on token-wise classification, which ignores the information of entity boundaries, and inevitably the performance is affected by the massive non-entity tokens. To this end, we propose a seminal span-based prototypical network (SpanProto) that tackles few-shot NER via a two-stage approach, including span extraction and mention classification. In the span extraction stage, we transform the sequential tags into a global boundary matrix, enabling the model to focus on the explicit boundary information. For mention classification, we leverage prototypical learning to capture the semantic representations for each labeled span and make the model better adapt to novel-class entities. To further improve the model performance, we split out the false positives generated by the span extractor but not labeled in the current episode set, and then present a margin-based loss to separate them from each prototype region. Experiments over multiple benchmarks demonstrate that our model outperforms strong baselines by a large margin.", + "author": "Jianing Wang; Chengyu Wang; Chuanqi Tan; Minghui Qiu; Songfang Huang; Jun Huang; Ming Gao", + "authorids": "/j/jianing-wang/; /c/chengyu-wang/; /c/chuanqi-tan/; /m/minghui-qiu/; /s/songfang-huang/; /j/jun-huang/; /m/ming-gao/", + "bibtex": "@inproceedings{wang-etal-2022-spanproto,\n title = \"{S}pan{P}roto: A Two-stage Span-based Prototypical Network for Few-shot Named Entity Recognition\",\n author = \"Wang, Jianing and\n Wang, Chengyu and\n Tan, Chuanqi and\n Qiu, Minghui and\n Huang, Songfang and\n Huang, Jun and\n Gao, Ming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.227/\",\n doi = \"10.18653/v1/2022.emnlp-main.227\",\n pages = \"3466--3476\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.227.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.227/", + "pdf_size": 1145427, + "gs_citation": 40, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9401929316860520265&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "School of Data Science and Engineering, East China Normal University, Shanghai, China+KLATASDS-MOE, School of Statistics, East China Normal University, Shanghai, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; School of Data Science and Engineering, East China Normal University, Shanghai, China+KLATASDS-MOE, School of Statistics, East China Normal University, Shanghai, China", + "aff_domain": "gmail.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;dase.ecnu.edu.cn", + "email": "gmail.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;dase.ecnu.edu.cn", + "github": "https://github.com/alibaba/EasyNLP", + "project": "", + "author_num": 7, + "aff_unique_index": "0+0;1;1;1;1;1;0+0", + "aff_unique_norm": "East China Normal University;Alibaba Group", + "aff_unique_dep": "School of Data Science and Engineering;", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "ECNU;Alibaba", + "aff_campus_unique_index": "0+0;1;1;1;1;1;0+0", + "aff_campus_unique": "Shanghai;Hangzhou", + "aff_country_unique_index": "0+0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.5", + "title": "Sparse Mixers: Combining MoE and Mixing to build a more efficient BERT", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We combine the capacity of sparsely gated Mixture-of-Experts (MoE) with the speed and stability of linear, mixing transformations to design the Sparse Mixer encoder model. Sparse Mixer slightly outperforms BERT on GLUE and SuperGLUE, but more importantly trains 65% faster and runs inference 61% faster. We also present a faster variant, prosaically named Fast Sparse Mixer, that marginally underperforms BERT on SuperGLUE, but trains and runs nearly twice as fast. We justify the design of these two models by carefully ablating through various mixing mechanisms, MoE configurations, and hyperparameters. Sparse Mixer overcomes many of the latency and stability concerns of MoE models and offers the prospect of serving sparse student models, without resorting to distilling them to dense variants.", + "author": "James Lee-Thorp; Joshua Ainslie", + "authorids": "/j/james-lee-thorp/; /j/joshua-ainslie/", + "bibtex": "@inproceedings{lee-thorp-ainslie-2022-sparse,\n title = \"Sparse Mixers: Combining {M}o{E} and Mixing to build a more efficient {BERT}\",\n author = \"Lee-Thorp, James and\n Ainslie, Joshua\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.5/\",\n doi = \"10.18653/v1/2022.findings-emnlp.5\",\n pages = \"58--75\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.5.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.5/", + "pdf_size": 486774, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12994975619979578256&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Google Research; Google Research", + "aff_domain": "google.com;google.com", + "email": "google.com;google.com", + "github": "https://github.com/google-research/google-research/tree/master/sparse_mixers", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.258", + "title": "Sparse Teachers Can Be Dense with Knowledge", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent advances in distilling pretrained language models have discovered that, besides the expressiveness of knowledge, the student-friendliness should be taken into consideration to realize a truly knowledgeable teacher. Based on a pilot study, we find that over-parameterized teachers can produce expressive yet student-unfriendly knowledge and are thus limited in overall knowledgeableness. To remove the parameters that result in student-unfriendliness, we propose a sparse teacher trick under the guidance of an overall knowledgeable score for each teacher parameter. The knowledgeable score is essentially an interpolation of the expressiveness and student-friendliness scores. The aim is to ensure that the expressive parameters are retained while the student-unfriendly ones are removed. Extensive experiments on the GLUE benchmark show that the proposed sparse teachers can be dense with knowledge and lead to students with compelling performance in comparison with a series of competitive baselines.", + "author": "Yi Yang; Chen Zhang; Dawei Song", + "authorids": "/y/yi-yang/; /c/chen-zhang/; /d/dawei-song/", + "bibtex": "@inproceedings{yang-etal-2022-sparse,\n title = \"Sparse Teachers Can Be Dense with Knowledge\",\n author = \"Yang, Yi and\n Zhang, Chen and\n Song, Dawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.258/\",\n doi = \"10.18653/v1/2022.emnlp-main.258\",\n pages = \"3904--3915\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.258.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.258/", + "pdf_size": 1046306, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3584543463202726064&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 7, + "aff": "Beijing Institute of Technology; Beijing Institute of Technology; Beijing Institute of Technology + The Open University, UK", + "aff_domain": "bit.edu.cn;bit.edu.cn;bit.edu.cn", + "email": "bit.edu.cn;bit.edu.cn;bit.edu.cn", + "github": "https://github.com/GeneZC/StarK", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0+1", + "aff_unique_norm": "Beijing Institute of Technology;The Open University", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bit.edu.cn/;https://www.open.ac.uk", + "aff_unique_abbr": "BIT;OU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "2022.findings-emnlp.160", + "title": "SparseAdapter: An Easy Approach for Improving the Parameter-Efficiency of Adapters", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Adapter Tuning, which freezes the pretrained language models (PLMs) and only fine-tunes a few extra modules, becomes an appealing efficient alternative to the full model fine-tuning. Although computationally efficient, the recent Adapters often increase parameters (e.g. bottleneck dimension) for matching the performance of full model fine-tuning, which we argue goes against their original intention. In this work, we re-examine the parameter-efficiency of Adapter through the lens of network pruning (we name such plug-in concept as SparseAdapter) and find that SparseAdapter can achieve comparable or better performance than standard Adapters when the sparse ratio reaches up to 80%. Based on our findings, we introduce an easy but effective setting \u201cLarge-Sparse\u201d to improve the model capacity of Adapters under the same parameter budget. Experiments on five competitive Adapters upon three advanced PLMs show that with proper sparse method (e.g. SNIP) and ratio (e.g. 40%) SparseAdapter can consistently outperform their corresponding counterpart. Encouragingly, with the Large-Sparse setting, we can obtain further appealing gains, even outperforming the full fine-tuning by a large margin.", + "author": "Shwai He; Liang Ding; Daize Dong; Jeremy Zhang; Dacheng Tao", + "authorids": "/s/shwai-he/; /l/liang-ding/; /d/daize-dong/; /j/jeremy-zhang/; /d/dacheng-tao/", + "bibtex": "@inproceedings{he-etal-2022-sparseadapter,\n title = \"{S}parse{A}dapter: An Easy Approach for Improving the Parameter-Efficiency of Adapters\",\n author = \"He, Shwai and\n Ding, Liang and\n Dong, Daize and\n Zhang, Jeremy and\n Tao, Dacheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.160/\",\n doi = \"10.18653/v1/2022.findings-emnlp.160\",\n pages = \"2184--2190\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.160.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.160/", + "pdf_size": 335860, + "gs_citation": 87, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=319993370871513516&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": "JD Explore Academy+University of Electronic Science and Technology of China; JD Explore Academy; University of Electronic Science and Technology of China; Aalborg University; JD Explore Academy+The university of Sydney", + "aff_domain": "gmail.com;jd.com;gmail.com;cs.aau.dk;gmail.com", + "email": "gmail.com;jd.com;gmail.com;cs.aau.dk;gmail.com", + "github": "https://github.com/Shwai-He/SparseAdapter", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;1;2;0+3", + "aff_unique_norm": "JD Explore Academy;University of Electronic Science and Technology of China;Aalborg University;University of Sydney", + "aff_unique_dep": ";;;", + "aff_unique_url": ";https://www.uestc.edu.cn;https://www.aau.dk;https://www.sydney.edu.au", + "aff_unique_abbr": ";UESTC;AAU;USYD", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "1;1;2;3", + "aff_country_unique": ";China;Denmark;Australia" + }, + { + "id": "2022.emnlp-main.505", + "title": "Speaker Overlap-aware Neural Diarization for Multi-party Meeting Analysis", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently, hybrid systems of clustering and neural diarization models have been successfully applied in multi-party meeting analysis. However, current models always treat overlapped speaker diarization as a multi-label classification problem, where speaker dependency and overlaps are not well considered. To overcome the disadvantages, we reformulate overlapped speaker diarization task as a single-label prediction problem via the proposed power set encoding (PSE). Through this formulation, speaker dependency and overlaps can be explicitly modeled. To fully leverage this formulation, we further propose the speaker overlap-aware neural diarization (SOND) model, which consists of a context-independent (CI) scorer to model global speaker discriminability, a context-dependent scorer (CD) to model local discriminability, and a speaker combining network (SCN) to combine and reassign speaker activities. Experimental results show that using the proposed formulation can outperform the state-of-the-art methods based on target speaker voice activity detection, and the performance can be further improved with SOND, resulting in a 6.30% relative diarization error reduction.", + "author": "Zhihao Du; ShiLiang Zhang; Siqi Zheng; Zhi-Jie Yan", + "authorids": "/z/zhihao-du/; /s/shiliang-zhang/; /s/siqi-zheng/; /z/zhi-jie-yan/", + "bibtex": "@inproceedings{du-etal-2022-speaker,\n title = \"Speaker Overlap-aware Neural Diarization for Multi-party Meeting Analysis\",\n author = \"Du, Zhihao and\n Zhang, ShiLiang and\n Zheng, Siqi and\n Yan, Zhi-Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.505/\",\n doi = \"10.18653/v1/2022.emnlp-main.505\",\n pages = \"7458--7469\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.505.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.505/", + "pdf_size": 590284, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2751478144666105370&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Speech Lab, Alibaba Group, China; Speech Lab, Alibaba Group, China; Speech Lab, Alibaba Group, China; Speech Lab, Alibaba Group, China", + "aff_domain": "alibaba-inc.com;alibaba-inc.com; ; ", + "email": "alibaba-inc.com;alibaba-inc.com; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "Speech Lab", + "aff_unique_url": "https://www.alibaba.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.680", + "title": "Specializing Multi-domain NMT via Penalizing Low Mutual Information", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multi-domain Neural Machine Translation (NMT) trains a single model with multiple domains. It is appealing because of its efficacy in handling multiple domains within one model. An ideal multi-domain NMT learns distinctive domain characteristics simultaneously, however, grasping the domain peculiarity is a non-trivial task. In this paper, we investigate domain-specific information through the lens of mutual information (MI) and propose a new objective that penalizes low MI to become higher.Our method achieved the state-of-the-art performance among the current competitive multi-domain NMT models. Also, we show our objective promotes low MI to be higher resulting in domain-specialized multi-domain NMT.", + "author": "Jiyoung Lee; Hantae Kim; Hyunchang Cho; Edward Choi; Cheonbok Park", + "authorids": "/j/jiyoung-lee/; /h/hantae-kim/; /h/hyunchang-cho/; /e/edward-choi/; /c/cheonbok-park/", + "bibtex": "@inproceedings{lee-etal-2022-specializing,\n title = \"Specializing Multi-domain {NMT} via Penalizing Low Mutual Information\",\n author = \"Lee, Jiyoung and\n Kim, Hantae and\n Cho, Hyunchang and\n Choi, Edward and\n Park, Cheonbok\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.680/\",\n doi = \"10.18653/v1/2022.emnlp-main.680\",\n pages = \"10015--10026\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.680.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.680/", + "pdf_size": 426303, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15739431197499223252&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.527", + "title": "Spectral Probing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Linguistic information is encoded at varying timescales (subwords, phrases, etc.) and communicative levels, such as syntax and semantics. Contextualized embeddings have analogously been found to capture these phenomena at distinctive layers and frequencies. Leveraging these findings, we develop a fully learnable frequency filter to identify spectral profiles for any given task. It enables vastly more granular analyses than prior handcrafted filters, and improves on efficiency. After demonstrating the informativeness of spectral probing over manual filters in a monolingual setting, we investigate its multilingual characteristics across seven diverse NLP tasks in six languages. Our analyses identify distinctive spectral profiles which quantify cross-task similarity in a linguistically intuitive manner, while remaining consistent across languages\u2014highlighting their potential as robust, lightweight task descriptors.", + "author": "Max M\u00fcller-Eberstein; Rob van der Goot; Barbara Plank", + "authorids": "/m/max-muller-eberstein/; /r/rob-van-der-goot/; /b/barbara-plank/", + "bibtex": "@inproceedings{muller-eberstein-etal-2022-spectral,\n title = \"Spectral Probing\",\n author = {M{\\\"u}ller-Eberstein, Max and\n van der Goot, Rob and\n Plank, Barbara},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.527/\",\n doi = \"10.18653/v1/2022.emnlp-main.527\",\n pages = \"7730--7741\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.527.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.527/", + "pdf_size": 809257, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4795982124677757783&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Department of Computer Science, IT University of Copenhagen, Denmark; Department of Computer Science, IT University of Copenhagen, Denmark; Center for Information and Language Processing (CIS), LMU Munich, Germany+Munich Center for Machine Learning (MCML), Munich, Germany", + "aff_domain": "itu.dk;itu.dk;lmu.de", + "email": "itu.dk;itu.dk;lmu.de", + "github": "https://github.com/mainlp/spectral-probing", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;1+2", + "aff_unique_norm": "IT University of Copenhagen;LMU Munich;Munich Center for Machine Learning", + "aff_unique_dep": "Department of Computer Science;Center for Information and Language Processing (CIS);", + "aff_unique_url": "https://itu.dk;https://www.lmu.de;", + "aff_unique_abbr": "ITU Copenhagen;LMU;MCML", + "aff_campus_unique_index": "1+1", + "aff_campus_unique": ";Munich", + "aff_country_unique_index": "0;0;1+1", + "aff_country_unique": "Denmark;Germany" + }, + { + "id": "2022.emnlp-industry.29", + "title": "SpeechNet: Weakly Supervised, End-to-End Speech Recognition at Industrial Scale", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "End-to-end automatic speech recognition systems represent the state of the art, but they rely on thousands of hours of manually annotated speech for training, as well as heavyweight computation for inference. Of course, this impedes commercialization since most companies lack vast human and computational resources. In this paper, we explore training and deploying an ASR system in the label-scarce, compute-limited setting. To reduce human labor, we use a third-party ASR system as a weak supervision source, supplemented with labeling functions derived from implicit user feedback. To accelerate inference, we propose to route production-time queries across a pool of CUDA graphs of varying input lengths, the distribution of which best matches the traffic\u2019s. Compared to our third-party ASR, we achieve a relative improvement in word-error rate of 8% and a speedup of 600%. Our system, called SpeechNet, currently serves 12 million queries per day on our voice-enabled smart television. To our knowledge, this is the first time a large-scale, Wav2vec-based deployment has been described in the academic literature.", + "author": "Raphael Tang; Karun Kumar; Gefei Yang; Akshat Pandey; Yajie Mao; Vladislav Belyaev; Madhuri Emmadi; Craig Murray; Ferhan Ture; Jimmy Lin", + "authorids": "/r/raphael-tang/; /k/karun-kumar/; /g/gefei-yang/; /a/akshat-pandey/; /y/yajie-mao/; /v/vladislav-belyaev/; /m/madhuri-emmadi/; /c/craig-murray/; /f/ferhan-ture/; /j/jimmy-lin/", + "bibtex": "@inproceedings{tang-etal-2022-speechnet,\n title = \"{S}peech{N}et: Weakly Supervised, End-to-End Speech Recognition at Industrial Scale\",\n author = \"Tang, Raphael and\n Kumar, Karun and\n Yang, Gefei and\n Pandey, Akshat and\n Mao, Yajie and\n Belyaev, Vladislav and\n Emmadi, Madhuri and\n Murray, Craig and\n Ture, Ferhan and\n Lin, Jimmy\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.29/\",\n doi = \"10.18653/v1/2022.emnlp-industry.29\",\n pages = \"285--293\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.29.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.29/", + "pdf_size": 1142904, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17977119013497151849&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Comcast Applied AI; Comcast Applied AI; Comcast Applied AI; Comcast Applied AI; Comcast Applied AI; Comcast Applied AI; Comcast Applied AI; Comcast Applied AI; Comcast Applied AI; University of Waterloo", + "aff_domain": "comcast.com;comcast.com;comcast.com;comcast.com;comcast.com;comcast.com;comcast.com;comcast.com;comcast.com;uwaterloo.ca", + "email": "comcast.com;comcast.com;comcast.com;comcast.com;comcast.com;comcast.com;comcast.com;comcast.com;comcast.com;uwaterloo.ca", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;0;0;0;0;1", + "aff_unique_norm": "Comcast;University of Waterloo", + "aff_unique_dep": "Applied AI;", + "aff_unique_url": "https://www.comcast.com;https://uwaterloo.ca", + "aff_unique_abbr": "Comcast;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;1", + "aff_country_unique": "United States;Canada" + }, + { + "id": "2022.emnlp-main.108", + "title": "SpeechUT: Bridging Speech and Text with Hidden-Unit for Encoder-Decoder Based Speech-Text Pre-training", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The rapid development of single-modal pre-training has prompted researchers to pay more attention to cross-modal pre-training methods. In this paper, we propose a unified-modal speech-unit-text pre-training model, SpeechUT, to connect the representations of a speech encoder and a text decoder with a shared unit encoder. Leveraging hidden-unit as an interface to align speech and text, we can decompose the speech-to-text model into a speech-to-unit model and a unit-to-text model, which can be jointly pre-trained with unpaired speech and text data respectively. Our proposed SpeechUT is fine-tuned and evaluated on automatic speech recognition (ASR) and speech translation (ST) tasks. Experimental results show that SpeechUT gets substantial improvements over strong baselines, and achieves state-of-the-art performance on both the LibriSpeech ASR and MuST-C ST tasks. To better understand the proposed SpeechUT, detailed analyses are conducted. The code and pre-trained models are available at https://aka.ms/SpeechUT.", + "author": "Ziqiang Zhang; Long Zhou; Junyi Ao; Shujie Liu; Lirong Dai; Jinyu Li; Furu Wei", + "authorids": "/z/ziqiang-zhang/; /l/long-zhou/; /j/junyi-ao/; /s/shujie-liu/; /l/lirong-dai/; /j/jinyu-li/; /f/furu-wei/", + "bibtex": "@inproceedings{zhang-etal-2022-speechut,\n title = \"{S}peech{UT}: Bridging Speech and Text with Hidden-Unit for Encoder-Decoder Based Speech-Text Pre-training\",\n author = \"Zhang, Ziqiang and\n Zhou, Long and\n Ao, Junyi and\n Liu, Shujie and\n Dai, Lirong and\n Li, Jinyu and\n Wei, Furu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.108/\",\n doi = \"10.18653/v1/2022.emnlp-main.108\",\n pages = \"1663--1676\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.108.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.108/", + "pdf_size": 2167372, + "gs_citation": 58, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8963519551547997493&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff": "University of Science and Technology of China+Microsoft; Microsoft; The Chinese University of Hong Kong, Shenzhen+Microsoft; Microsoft; University of Science and Technology of China; Microsoft; Microsoft", + "aff_domain": "ustc.edu.cn;microsoft.com;cuhk.edu.cn;microsoft.com;ustc.edu.cn;microsoft.com;microsoft.com", + "email": "ustc.edu.cn;microsoft.com;cuhk.edu.cn;microsoft.com;ustc.edu.cn;microsoft.com;microsoft.com", + "github": "", + "project": "https://aka.ms/SpeechUT", + "author_num": 7, + "aff_unique_index": "0+1;1;2+1;1;0;1;1", + "aff_unique_norm": "University of Science and Technology of China;Microsoft Corporation;The Chinese University of Hong Kong", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com;https://www.cuhk.edu.cn", + "aff_unique_abbr": "USTC;Microsoft;CUHK", + "aff_campus_unique_index": ";1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0+1;1;0+1;1;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.138", + "title": "Stanceosaurus: Classifying Stance Towards Multicultural Misinformation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present Stanceosaurus, a new corpus of 28,033 tweets in English, Hindi and Arabic annotated with stance towards 250 misinformation claims. As far as we are aware, it is the largest corpus annotated with stance towards misinformation claims. The claims in Stanceosaurus originate from 15 fact-checking sources that cover diverse geographical regions and cultures. Unlike existing stance datasets, we introduce a more fine-grained 5-class labeling strategy with additional subcategories to distinguish implicit stance. Pre-trained transformer-based stance classifiers that are fine-tuned on our corpus show good generalization on unseen claims and regional claims from countries outside the training data. Cross-lingual experiments demonstrate Stanceosaurus\u2019 capability of training multilingual models, achieving 53.1 F1 on Hindi and 50.4 F1 on Arabic without any target-language fine-tuning. Finally, we show how a domain adaptation method can be used to improve performance on Stanceosaurus using additional RumourEval-2019 data. We will make Stanceosaurus publicly available to the research community upon publication and hope it will encourage further work on misinformation identification across languages and cultures.", + "author": "Jonathan Zheng; Ashutosh Baheti; Tarek Naous; Wei Xu; Alan Ritter", + "authorids": "/j/jonathan-zheng/; /a/ashutosh-baheti/; /t/tarek-naous/; /w/wei-xu/; /a/alan-ritter/", + "bibtex": "@inproceedings{zheng-etal-2022-stanceosaurus,\n title = \"Stanceosaurus: Classifying Stance Towards Multicultural Misinformation\",\n author = \"Zheng, Jonathan and\n Baheti, Ashutosh and\n Naous, Tarek and\n Xu, Wei and\n Ritter, Alan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.138/\",\n doi = \"10.18653/v1/2022.emnlp-main.138\",\n pages = \"2132--2151\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.138.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.138/", + "pdf_size": 2884643, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9440536661353503100&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 5, + "aff": "School of Interactive Computing, Georgia Institute of Technology; School of Interactive Computing, Georgia Institute of Technology; School of Interactive Computing, Georgia Institute of Technology; School of Interactive Computing, Georgia Institute of Technology; School of Interactive Computing, Georgia Institute of Technology", + "aff_domain": "gatech.edu;gatech.edu;gatech.edu;cc.gatech.edu;cc.gatech.edu", + "email": "gatech.edu;gatech.edu;gatech.edu;cc.gatech.edu;cc.gatech.edu", + "github": "", + "project": "https://tinyurl.com/stanceosaurus", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Georgia Institute of Technology", + "aff_unique_dep": "School of Interactive Computing", + "aff_unique_url": "https://www.gatech.edu", + "aff_unique_abbr": "Georgia Tech", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Atlanta", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.474", + "title": "Status Biases in Deliberation Online: Evidence from a Randomized Experiment on ChangeMyView", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Status is widely used to incentivize user engagement online. However, visible status indicators could inadvertently bias online deliberation to favor high-status users. In this work, we design and deploy a randomized experiment on the ChangeMyView platform to quantify status biases in deliberation online. We find strong evidence of status bias: hiding status on ChangeMyView increases the persuasion rate of moderate-status users by 84% and decreases the persuasion rate of high-status users by 41% relative to the control group. We also find that the persuasive power of status is moderated by verbosity, suggesting that status is used as an information-processing heuristic under cognitive load. Finally, we find that a user\u2019s status influences the argumentation behavior of other users they interact with in a manner that disadvantages low and moderate-status users.", + "author": "Emaad Manzoor; Yohan Jo; Alan Montgomery", + "authorids": "/e/emaad-manzoor/; /y/yohan-jo/; /a/alan-montgomery/", + "bibtex": "@inproceedings{manzoor-etal-2022-status,\n title = \"Status Biases in Deliberation Online: Evidence from a Randomized Experiment on {C}hange{M}y{V}iew\",\n author = \"Manzoor, Emaad and\n Jo, Yohan and\n Montgomery, Alan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.474/\",\n doi = \"10.18653/v1/2022.findings-emnlp.474\",\n pages = \"6351--6363\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.474.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.474/", + "pdf_size": 1344314, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11644189880811432705&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "University of Wisconsin Madison; Amazon; Carnegie Mellon University", + "aff_domain": "wisc.edu;amazon.com;cmu.edu", + "email": "wisc.edu;amazon.com;cmu.edu", + "github": "https://github.com/emaadmanzoor/2022-emnlp-status_biases", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "University of Wisconsin-Madison;Amazon.com, Inc.;Carnegie Mellon University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.wisc.edu;https://www.amazon.com;https://www.cmu.edu", + "aff_unique_abbr": "UW-Madison;Amazon;CMU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Madison;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.124", + "title": "Stop Measuring Calibration When Humans Disagree", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Calibration is a popular framework to evaluate whether a classifier knows when it does not know - i.e., its predictive probabilities are a good indication of how likely a prediction is to be correct. Correctness is commonly estimated against the human majority class. Recently, calibration to human majority has been measured on tasks where humans inherently disagree about which class applies. We show that measuring calibration to human majority given inherent disagreements is theoretically problematic, demonstrate this empirically on the ChaosNLI dataset, and derive several instance-level measures of calibration that capture key statistical properties of human judgements - including class frequency, ranking and entropy.", + "author": "Joris Baan; Wilker Aziz; Barbara Plank; Raquel Fernandez", + "authorids": "/j/joris-baan/; /w/wilker-aziz/; /b/barbara-plank/; /r/raquel-fernandez/", + "bibtex": "@inproceedings{baan-etal-2022-stop,\n title = \"Stop Measuring Calibration When Humans Disagree\",\n author = \"Baan, Joris and\n Aziz, Wilker and\n Plank, Barbara and\n Fernandez, Raquel\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.124/\",\n doi = \"10.18653/v1/2022.emnlp-main.124\",\n pages = \"1892--1915\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.124.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.124/", + "pdf_size": 1764332, + "gs_citation": 56, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17422860126757094740&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 5, + "aff": "University of Amsterdam; University of Amsterdam; IT University of Copenhagen+MCML Munich+LMU Munich; University of Amsterdam", + "aff_domain": "uva.nl;uva.nl;lmu.de;uva.nl", + "email": "uva.nl;uva.nl;lmu.de;uva.nl", + "github": "https://github.com/jsbaan/calibration-on-disagreement-data", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1+2+3;0", + "aff_unique_norm": "University of Amsterdam;IT University of Copenhagen;MCML Munich;Ludwig Maximilian University of Munich", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.uva.nl;https://itu.dk;;https://www.lmu.de", + "aff_unique_abbr": "UvA;ITU;;LMU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Munich", + "aff_country_unique_index": "0;0;1+2+2;0", + "aff_country_unique": "Netherlands;Denmark;Germany" + }, + { + "id": "2022.emnlp-main.114", + "title": "StoryER: Automatic Story Evaluation via Ranking, Rating and Reasoning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing automatic story evaluation methods place a premium on story lexical level coherence, deviating from human preference.We go beyond this limitation by considering a novel Story Evaluation method that mimics human preference when judging a story, namely StoryER, which consists of three sub-tasks: Ranking, Rating and Reasoning.Given either a machine-generated or a human-written story, StoryER requires the machine to output 1) a preference score that corresponds to human preference, 2) specific ratings and their corresponding confidences and 3) comments for various aspects (e.g., opening, character-shaping).To support these tasks, we introduce a well-annotated dataset comprising (i) 100k ranked story pairs; and (ii) a set of 46k ratings and comments on various aspects of the story.We finetune Longformer-Encoder-Decoder (LED) on the collected dataset, with the encoder responsible for preference score and aspect prediction and the decoder for comment generation.Our comprehensive experiments result a competitive benchmark for each task, showing the high correlation to human preference.In addition, we have witnessed the joint learning of the preference scores, the aspect ratings, and the comments brings gain each single task.Our dataset and benchmarks are publicly available to advance the research of story evaluation tasks.", + "author": "Hong Chen; Duc Vo; Hiroya Takamura; Yusuke Miyao; Hideki Nakayama", + "authorids": "/h/hong-chen/; /d/duc-vo/; /h/hiroya-takamura/; /y/yusuke-miyao/; /h/hideki-nakayama/", + "bibtex": "@inproceedings{chen-etal-2022-storyer,\n title = \"{S}tory{ER}: Automatic Story Evaluation via Ranking, Rating and Reasoning\",\n author = \"Chen, Hong and\n Vo, Duc and\n Takamura, Hiroya and\n Miyao, Yusuke and\n Nakayama, Hideki\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.114/\",\n doi = \"10.18653/v1/2022.emnlp-main.114\",\n pages = \"1739--1753\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.114.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.114/", + "pdf_size": 1615301, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2635668422335208369&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "The University of Tokyo1 + National Institute of Advanced Industrial Science and Technology, Japan3; The University of Tokyo1 + National Institute of Advanced Industrial Science and Technology, Japan3; Tokyo Institute of Technology2 + National Institute of Advanced Industrial Science and Technology, Japan3; The University of Tokyo1 + National Institute of Advanced Industrial Science and Technology, Japan3; The University of Tokyo1 + National Institute of Advanced Industrial Science and Technology, Japan3", + "aff_domain": "nlab.ci.i.u-tokyo.ac.jp;nlab.ci.i.u-tokyo.ac.jp;aist.go.jp;is.s.u-tokyo.ac.jp;nlab.ci.i.u-tokyo.ac.jp", + "email": "nlab.ci.i.u-tokyo.ac.jp;nlab.ci.i.u-tokyo.ac.jp;aist.go.jp;is.s.u-tokyo.ac.jp;nlab.ci.i.u-tokyo.ac.jp", + "github": "https://github.com/sairin1202/StoryER", + "project": "http://storytelling-lab.com/eval", + "author_num": 5, + "aff_unique_index": "0+1;0+1;2+1;0+1;0+1", + "aff_unique_norm": "The University of Tokyo;National Institute of Advanced Industrial Science and Technology;Tokyo Institute of Technology", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.u-tokyo.ac.jp;https://www.aist.go.jp;https://www.titech.ac.jp", + "aff_unique_abbr": "UTokyo;AIST;Titech", + "aff_campus_unique_index": ";;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "Japan" + }, + { + "id": "2022.findings-emnlp.28", + "title": "Stretching Sentence-pair NLI Models to Reason over Long Documents and Clusters", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Natural Language Inference (NLI) has been extensively studied by the NLP community as a framework for estimating the semantic relation between sentence pairs. While early work identified certain biases in NLI models, recent advancements in modeling and datasets demonstrated promising performance.In this work, we further explore the direct zero-shot applicability of NLI models to real applications, beyond the sentence-pair setting they were trained on. First, we analyze the robustness of these models to longer and out-of-domain inputs. Then, we develop new aggregation methods to allow operating over full documents, reaching state-of-the-art performance on the ContractNLI dataset. Interestingly, we find NLI scores to provide strong retrieval signals, leading to more relevant evidence extractions compared to common similarity-based methods. Finally, we go further and investigate whole document clusters to identify both discrepancies and consensus among sources. In a test case, we find real inconsistencies between Wikipedia pages in different languages about the same topic.", + "author": "Tal Schuster; Sihao Chen; Senaka Buthpitiya; Alex Fabrikant; Donald Metzler", + "authorids": "/t/tal-schuster/; /s/sihao-chen/; /s/senaka-buthpitiya/; /a/alex-fabrikant/; /d/donald-metzler/", + "bibtex": "@inproceedings{schuster-etal-2022-stretching,\n title = \"Stretching Sentence-pair {NLI} Models to Reason over Long Documents and Clusters\",\n author = \"Schuster, Tal and\n Chen, Sihao and\n Buthpitiya, Senaka and\n Fabrikant, Alex and\n Metzler, Donald\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.28/\",\n doi = \"10.18653/v1/2022.findings-emnlp.28\",\n pages = \"394--412\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.28.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.28/", + "pdf_size": 887183, + "gs_citation": 45, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7326266836230604853&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "Google Research; Google Research + University of Pennsylvania; Google Research; Google Research; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com", + "github": "https://github.com/google-research-datasets/wiki-translated-clusters-nli", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;0;0;0", + "aff_unique_norm": "Google;University of Pennsylvania", + "aff_unique_dep": "Google Research;", + "aff_unique_url": "https://research.google;https://www.upenn.edu", + "aff_unique_abbr": "Google Research;UPenn", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Mountain View;", + "aff_country_unique_index": "0;0+0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.739", + "title": "Structural Constraints and Natural Language Inference for End-to-End Flowchart Grounded Dialog Response Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Flowchart grounded dialog systems converse with users by following a given flowchart and a corpus of FAQs. The existing state-of-the-art approach (Raghu et al, 2021) for learning such a dialog system, named FLONET, has two main limitations. (1) It uses a Retrieval Augmented Generation (RAG) framework which represents a flowchart as a bag of nodes. By doing so, it loses the connectivity structure between nodes that can aid in better response generation. (2) Typically dialogs progress with the agent asking polar (Y/N) questions, but users often respond indirectly without the explicit use of polar words. In such cases, it fails to understand the correct polarity of the answer. To overcome these issues, we propose Structure-Aware FLONET (SA-FLONET) which infuses structural constraints derived from the connectivity structure of flowcharts into the RAG framework. It uses natural language inference to better predict the polarity of indirect Y/N answers. We find that SA-FLONET outperforms FLONET, with a success rate improvement of 68% and 123% in flowchart grounded response generation and zero-shot flowchart grounded response generation tasks respectively.", + "author": "Dinesh Raghu; Suraj Joshi; Sachindra Joshi; Mausam -", + "authorids": "/d/dinesh-raghu/; /s/suraj-joshi/; /s/sachindra-joshi/; /m/mausam/", + "bibtex": "@inproceedings{raghu-etal-2022-structural,\n title = \"Structural Constraints and Natural Language Inference for End-to-End Flowchart Grounded Dialog Response Generation\",\n author = \"Raghu, Dinesh and\n Joshi, Suraj and\n Joshi, Sachindra and\n -, Mausam\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.739/\",\n doi = \"10.18653/v1/2022.emnlp-main.739\",\n pages = \"10763--10774\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.739.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.739/", + "pdf_size": 550900, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14492520267664387270&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Indian Institute of Technology, New Delhi, India+IBM Research, New Delhi, India; Indian Institute of Technology, New Delhi, India; IBM Research, New Delhi, India; Indian Institute of Technology, New Delhi, India", + "aff_domain": "in.ibm.com;iitd.ac.in;in.ibm.com;cse.iitd.ac.in", + "email": "in.ibm.com;iitd.ac.in;in.ibm.com;cse.iitd.ac.in", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;1;0", + "aff_unique_norm": "Indian Institute of Technology;IBM Research", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.iitdelhi.ac.in;https://www.ibm.com/research", + "aff_unique_abbr": "IIT Delhi;IBM", + "aff_campus_unique_index": "0+0;0;0;0", + "aff_campus_unique": "New Delhi", + "aff_country_unique_index": "0+0;0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.findings-emnlp.362", + "title": "Structural Contrastive Representation Learning for Zero-shot Multi-label Text Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Zero-shot multi-label text classification (ZMTC) is a fundamental task in natural language processing with applications in the cold start problem of recommendation systems. Ideally, one would learn an expressive representation of both input text and label features so that ZMTC is transformed into a nearest neighbor search problem. However, the existing representation learning approaches for ZMTC struggle with accuracy as well as poor training efficiency. Firstly, the input text is structural, consisting of both short title sentences and long content paragraphs. It is challenging to model the correlation between short label descriptions and long structural input documents. Secondly, the enormous label space in ZMTC forces the existing approaches to perform multi-stage learning with label engineering. As a result, the training overhead is significant. In this paper, we address both problems by introducing an end-to-end structural contrastive representation learning approach. We propose a randomized text segmentation (RTS) technique to generate high-quality contrastive pairs. This RTS technique allows us to model title-content correlation. Additionally, we simplify the multi-stage ZMTC learning strategy by avoiding label engineering. Extensive experiments demonstrate that our approach leads to up to 2.33% improvement in precision@1 and 5.94x speedup in training time on publicly available datasets. Our code is available publicly.", + "author": "Tianyi Zhang; Zhaozhuo Xu; Tharun Medini; Anshumali Shrivastava", + "authorids": "/t/tianyi-zhang/; /z/zhaozhuo-xu/; /t/tharun-medini/; /a/anshumali-shrivastava/", + "bibtex": "@inproceedings{zhang-etal-2022-structural-contrastive,\n title = \"Structural Contrastive Representation Learning for Zero-shot Multi-label Text Classification\",\n author = \"Zhang, Tianyi and\n Xu, Zhaozhuo and\n Medini, Tharun and\n Shrivastava, Anshumali\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.362/\",\n doi = \"10.18653/v1/2022.findings-emnlp.362\",\n pages = \"4937--4947\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.362.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.362/", + "pdf_size": 1133931, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13323943659646387093&as_sdt=800005&sciodt=0,15&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, Rice University; Department of Computer Science, Rice University; ThirdAI Corp; Department of Computer Science, Rice University + ThirdAI Corp", + "aff_domain": "rice.edu;rice.edu;thirdai.com;rice.edu", + "email": "rice.edu;rice.edu;thirdai.com;rice.edu", + "github": "https://github.com/tonyzhang617/structural-contrastive-representation-learning", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0+1", + "aff_unique_norm": "Rice University;ThirdAI Corporation", + "aff_unique_dep": "Department of Computer Science;", + "aff_unique_url": "https://www.rice.edu;", + "aff_unique_abbr": "Rice;ThirdAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.337", + "title": "Structural generalization is hard for sequence-to-sequence models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Sequence-to-sequence (seq2seq) models have been successful across many NLP tasks,including ones that require predicting linguistic structure. However, recent work on compositional generalization has shown that seq2seq models achieve very low accuracy in generalizing to linguistic structures that were not seen in training. We present new evidence that this is a general limitation of seq2seq models that is present not just in semantic parsing, but also in syntactic parsing and in text-to-text tasks, and that this limitation can often be overcome by neurosymbolic models that have linguistic knowledge built in. We further report on some experiments that give initial answers on the reasons for these limitations.", + "author": "Yuekun Yao; Alexander Koller", + "authorids": "/y/yuekun-yao/; /a/alexander-koller/", + "bibtex": "@inproceedings{yao-koller-2022-structural,\n title = \"Structural generalization is hard for sequence-to-sequence models\",\n author = \"Yao, Yuekun and\n Koller, Alexander\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.337/\",\n doi = \"10.18653/v1/2022.emnlp-main.337\",\n pages = \"5048--5062\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.337.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.337/", + "pdf_size": 402357, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15578520759719689755&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Language Science and Technology, Saarland Informatics Campus, Saarland University, Saarbr\u00fccken, Germany; Department of Language Science and Technology, Saarland Informatics Campus, Saarland University, Saarbr\u00fccken, Germany", + "aff_domain": "coli.uni-saarland.de;coli.uni-saarland.de", + "email": "coli.uni-saarland.de;coli.uni-saarland.de", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Saarland University", + "aff_unique_dep": "Department of Language Science and Technology", + "aff_unique_url": "https://www.uni-saarland.de", + "aff_unique_abbr": "Uni Saar", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Saarbr\u00fccken", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.365", + "title": "Structurally Diverse Sampling for Sample-Efficient Training and Comprehensive Evaluation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A growing body of research has demonstrated the inability of NLP models to generalize compositionally and has tried to alleviate it through specialized architectures, training schemes, and data augmentation, among other approaches. In this work, we study a different approach: training on instances with diverse structures. We propose a model-agnostic algorithm for subsampling such sets of instances from a labeled instance pool with structured outputs. Evaluating on both compositional template splits and traditional IID splits of 5 semantic parsing datasets of varying complexity, we show that structurally diverse training using our algorithm leads to comparable or better generalization than prior algorithms in 9 out of 10 dataset-split type pairs. In general, we find structural diversity to consistently improve sample efficiency compared to random train sets. Moreover, we show that structurally diverse sampling yields comprehensive test sets that are a lot more challenging than IID test sets. Finally, we provide two explanations for improved generalization from diverse train sets: 1) improved coverage of output substructures, and 2) a reduction in spurious correlations between these substructures.", + "author": "Shivanshu Gupta; Sameer Singh; Matt Gardner", + "authorids": "/s/shivanshu-gupta/; /s/sameer-singh/; /m/matt-gardner/", + "bibtex": "@inproceedings{gupta-etal-2022-structurally,\n title = \"Structurally Diverse Sampling for Sample-Efficient Training and Comprehensive Evaluation\",\n author = \"Gupta, Shivanshu and\n Singh, Sameer and\n Gardner, Matt\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.365/\",\n doi = \"10.18653/v1/2022.findings-emnlp.365\",\n pages = \"4966--4979\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.365.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.365/", + "pdf_size": 1306889, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7351103872168436707&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "University of California Irvine; University of California Irvine + Allen Institute for AI; Microsoft Semantic Machines", + "aff_domain": "uci.edu;uci.edu;microsoft.com", + "email": "uci.edu;uci.edu;microsoft.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0+1;2", + "aff_unique_norm": "University of California, Irvine;Allen Institute for AI;Microsoft", + "aff_unique_dep": ";;Semantic Machines", + "aff_unique_url": "https://www.uci.edu;https://allenai.org;https://www.microsoft.com", + "aff_unique_abbr": "UCI;AI2;Microsoft", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Irvine;", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.556", + "title": "Structure-Unified M-Tree Coding Solver for Math Word Problem", + "track": "main", + "status": "Main", + "award": false, + "abstract": "As one of the challenging NLP tasks, designing math word problem (MWP) solvers has attracted increasing research attention for the past few years. In previous work, models designed by taking into account the properties of the binary tree structure of mathematical expressions at the output side have achieved better performance. However, the expressions corresponding to a MWP are often diverse (e.g., n1+n2 \u00d7 n3-n4, n3\u00d7 n2-n4+n1, etc.), and so are the corresponding binary trees, which creates difficulties in model learning due to the non-deterministic output space. In this paper, we propose the Structure-Unified M-Tree Coding Solver (SUMC-Solver), which applies a tree with any M branches (M-tree) to unify the output structures. To learn the M-tree, we use a mapping to convert the M-tree into the M-tree codes, where codes store the information of the paths from tree root to leaf nodes and the information of leaf nodes themselves, and then devise a Sequence-to-Code (seq2code) model to generate the codes. Experimental results on the widely used MAWPS and Math23K datasets have demonstrated that SUMC-Solver not only outperforms several state-of-the-art models under similar experimental settings but also performs much better under low-resource conditions.", + "author": "Bin Wang; Jiangzhou Ju; Yang Fan; Xinyu Dai; Shujian Huang; Jiajun Chen", + "authorids": "/b/bin-wang/; /j/jiangzhou-ju/; /y/yang-fan/; /x/xinyu-dai/; /s/shujian-huang/; /j/jiajun-chen/", + "bibtex": "@inproceedings{wang-etal-2022-structure,\n title = \"Structure-Unified {M}-Tree Coding Solver for Math Word Problem\",\n author = \"Wang, Bin and\n Ju, Jiangzhou and\n Fan, Yang and\n Dai, Xinyu and\n Huang, Shujian and\n Chen, Jiajun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.556/\",\n doi = \"10.18653/v1/2022.emnlp-main.556\",\n pages = \"8122--8132\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.556.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.556/", + "pdf_size": 875431, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15963934252194310418&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "National Key Laboratory for Novel Software Technology, Nanjing University; Collaborative Innovation Center of Novel Software Technology and Industrialization, Nanjing; National Key Laboratory for Novel Software Technology, Nanjing University; Collaborative Innovation Center of Novel Software Technology and Industrialization, Nanjing; National Key Laboratory for Novel Software Technology, Nanjing University; Collaborative Innovation Center of Novel Software Technology and Industrialization, Nanjing", + "aff_domain": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn;nju.edu.cn", + "email": "smail.nju.edu.cn;smail.nju.edu.cn;smail.nju.edu.cn;nju.edu.cn;nju.edu.cn;nju.edu.cn", + "github": "https://github.com/devWangBin/SUMC-Solver", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;1;0;1", + "aff_unique_norm": "Nanjing University;Collaborative Innovation Center of Novel Software Technology and Industrialization", + "aff_unique_dep": "National Key Laboratory for Novel Software Technology;", + "aff_unique_url": "http://www.nju.edu.cn;", + "aff_unique_abbr": "Nanjing University;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.219", + "title": "StuBot: Learning by Teaching a Conversational Agent Through Machine Reading Comprehension", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This paper proposes StuBot, a text-based conversational agent that provides adaptive feedback for learning by teaching. StuBot first asks the users to teach the learning content by summarizing and explaining it in their own words. After the users inputted the explanation text for teaching, StuBot uses a machine reading comprehension (MRC) engine to provide adaptive feedback with further questions about the insufficient parts of the explanation text. We conducted a within-subject study to evaluate the effectiveness of adaptive feedback by StuBot. Both the quantitative and qualitative results showed that learning by teaching with adaptive feedback can improve learning performance, immersion, and overall experience.", + "author": "Nayoung Jin; Hana Lee", + "authorids": "/n/nayoung-jin/; /h/hana-lee/", + "bibtex": "@inproceedings{jin-lee-2022-stubot,\n title = \"{S}tu{B}ot: Learning by Teaching a Conversational Agent Through Machine Reading Comprehension\",\n author = \"Jin, Nayoung and\n Lee, Hana\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.219/\",\n doi = \"10.18653/v1/2022.findings-emnlp.219\",\n pages = \"3008--3020\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.219.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.219/", + "pdf_size": 5895519, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12141384888117176197&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2 + }, + { + "id": "2022.emnlp-main.120", + "title": "Style Transfer as Data Augmentation: A Case Study on Named Entity Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this work, we take the named entity recognition task in the English language as a case study and explore style transfer as a data augmentation method to increase the size and diversity of training data in low-resource scenarios. We propose a new method to effectively transform the text from a high-resource domain to a low-resource domain by changing its style-related attributes to generate synthetic data for training. Moreover, we design a constrained decoding algorithm along with a set of key ingredients for data selection to guarantee the generation of valid and coherent data. Experiments and analysis on five different domain pairs under different data regimes demonstrate that our approach can significantly improve results compared to current state-of-the-art data augmentation methods. Our approach is a practical solution to data scarcity, and we expect it to be applicable to other NLP tasks.", + "author": "Shuguang Chen; Leonardo Neves; Thamar Solorio", + "authorids": "/s/shuguang-chen/; /l/leonardo-neves/; /t/thamar-solorio/", + "bibtex": "@inproceedings{chen-etal-2022-style,\n title = \"Style Transfer as Data Augmentation: A Case Study on Named Entity Recognition\",\n author = \"Chen, Shuguang and\n Neves, Leonardo and\n Solorio, Thamar\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.120/\",\n doi = \"10.18653/v1/2022.emnlp-main.120\",\n pages = \"1827--1841\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.120.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.120/", + "pdf_size": 1358906, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13548916340213195519&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Houston; Snap Inc.; University of Houston", + "aff_domain": "uh.edu;snap.com;uh.edu", + "email": "uh.edu;snap.com;uh.edu", + "github": "https://github.com/RiTUAL-UH/DA_NER", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "University of Houston;Snap Inc.", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.uh.edu;https://www.snapinc.com", + "aff_unique_abbr": "UH;Snap", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.103", + "title": "SubeventWriter: Iterative Sub-event Sequence Generation with Coherence Controller", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we propose a new task of sub-event generation for an unseen process to evaluate the understanding of the coherence of sub-event actions and objects. To solve the problem, we design SubeventWriter, a sub-event sequence generation framework with a coherence controller. Given an unseen process, the framework can iteratively construct the sub-event sequence by generating one sub-event at each iteration. We also design a very effective coherence controller to decode more coherent sub-events. As our extensive experiments and analysis indicate, SubeventWriter can generate more reliable and meaningful sub-event sequences for unseen processes.", + "author": "Zhaowei Wang; Hongming Zhang; Tianqing Fang; Yangqiu Song; Ginny Wong; Simon See", + "authorids": "/z/zhaowei-wang/; /h/hongming-zhang/; /t/tianqing-fang/; /y/yangqiu-song/; /g/ginny-wong/; /s/simon-see/", + "bibtex": "@inproceedings{wang-etal-2022-subeventwriter,\n title = \"{S}ubevent{W}riter: Iterative Sub-event Sequence Generation with Coherence Controller\",\n author = \"Wang, Zhaowei and\n Zhang, Hongming and\n Fang, Tianqing and\n Song, Yangqiu and\n Wong, Ginny and\n See, Simon\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.103/\",\n doi = \"10.18653/v1/2022.emnlp-main.103\",\n pages = \"1590--1604\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.103.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.103/", + "pdf_size": 735866, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11634670475284958198&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science and Engineering, HKUST; Tencent AI Lab, Bellevue, USA; Department of Computer Science and Engineering, HKUST; Department of Computer Science and Engineering, HKUST; NVIDIA AI Technology Center (NV AITC), NVIDIA, Santa Clara, USA; NVIDIA AI Technology Center (NV AITC), NVIDIA, Santa Clara, USA", + "aff_domain": "cse.ust.hk;global.tencent.com;cse.ust.hk;cse.ust.hk;nvidia.com;nvidia.com", + "email": "cse.ust.hk;global.tencent.com;cse.ust.hk;cse.ust.hk;nvidia.com;nvidia.com", + "github": "https://github.com/HKUST-KnowComp/SubeventWriter", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;2;2", + "aff_unique_norm": "Hong Kong University of Science and Technology;Tencent;NVIDIA", + "aff_unique_dep": "Department of Computer Science and Engineering;AI Lab;NVIDIA AI Technology Center", + "aff_unique_url": "https://www.hkust.edu.hk;https://ai.tencent.com;https://www.nvidia.com", + "aff_unique_abbr": "HKUST;Tencent AI Lab;NV", + "aff_campus_unique_index": "1;2;2", + "aff_campus_unique": ";Bellevue;Santa Clara", + "aff_country_unique_index": "0;1;0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.503", + "title": "Subword Evenness (SuE) as a Predictor of Cross-lingual Transfer to Low-resource Languages", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained multilingual models, such as mBERT, XLM-R and mT5, are used to improve the performance on various tasks in low-resource languages via cross-lingual transfer. In this framework, English is usually seen as the most natural choice for a transfer language (for fine-tuning or continued training of a multilingual pre-trained model), but it has been revealed recently that this is often not the best choice. The success of cross-lingual transfer seems to depend on some properties of languages, which are currently hard to explain. Successful transfer often happens between unrelated languages and it often cannot be explained by data-dependent factors.In this study, we show that languages written in non-Latin and non-alphabetic scripts (mostly Asian languages) are the best choices for improving performance on the task of Masked Language Modelling (MLM) in a diverse set of 30 low-resource languages and that the success of the transfer is well predicted by our novel measure of Subword Evenness (SuE). Transferring language models over the languages that score low on our measure results in the lowest average perplexity over target low-resource languages. Our correlation coefficients obtained with three different pre-trained multilingual models are consistently higher than all the other predictors, including text-based measures (type-token ratio, entropy) and linguistically motivated choice (genealogical and typological proximity).", + "author": "Olga Pelloni; Anastassia Shaitarova; Tanja Samardzic", + "authorids": "/o/olga-pelloni/; /a/anastassia-shaitarova/; /t/tanja-samardzic/", + "bibtex": "@inproceedings{pelloni-etal-2022-subword,\n title = \"Subword Evenness ({S}u{E}) as a Predictor of Cross-lingual Transfer to Low-resource Languages\",\n author = \"Pelloni, Olga and\n Shaitarova, Anastassia and\n Samardzic, Tanja\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.503/\",\n doi = \"10.18653/v1/2022.emnlp-main.503\",\n pages = \"7428--7445\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.503.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.503/", + "pdf_size": 1350456, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=871791711407618625&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Text Group, URPP Language and Space; Department of Computational Linguistics, University of Zurich, Switzerland; Text Group, URPP Language and Space + Department of Computational Linguistics, University of Zurich, Switzerland", + "aff_domain": "gmail.com;uzh.ch;uzh.ch", + "email": "gmail.com;uzh.ch;uzh.ch", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0+1", + "aff_unique_norm": "University Research Priority Program Language and Space;University of Zurich", + "aff_unique_dep": "Text Group;Department of Computational Linguistics", + "aff_unique_url": ";https://www.unizh.ch", + "aff_unique_abbr": ";UZH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0+0", + "aff_country_unique": "Switzerland" + }, + { + "id": "2022.findings-emnlp.494", + "title": "Subword Segmental Language Modelling for Nguni Languages", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Subwords have become the standard units of text in NLP, enabling efficient open-vocabulary models. With algorithms like byte-pair encoding (BPE), subword segmentation is viewed as a preprocessing step applied to the corpus before training. This can lead to sub-optimal segmentations for low-resource languages with complex morphologies. We propose a subword segmental language model (SSLM) that learns how to segment words while being trained for autoregressive language modelling. By unifying subword segmentation and language modelling, our model learns subwords that optimise LM performance. We train our model on the 4 Nguni languages of South Africa. These are low-resource agglutinative languages, so subword information is critical. As an LM, SSLM outperforms existing approaches such as BPE-based models on average across the 4 languages. Furthermore, it outperforms standard subword segmenters on unsupervised morphological segmentation. We also train our model as a word-level sequence model, resulting in an unsupervised morphological segmenter that outperforms existing methods by a large margin for all 4 languages. Our results show that learning subword segmentation is an effective alternative to existing subword segmenters, enabling the model to discover morpheme-like subwords that improve its LM capabilities.", + "author": "Francois Meyer; Jan Buys", + "authorids": "/f/francois-meyer/; /j/jan-buys/", + "bibtex": "@inproceedings{meyer-buys-2022-subword,\n title = \"Subword Segmental Language Modelling for Nguni Languages\",\n author = \"Meyer, Francois and\n Buys, Jan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.494/\",\n doi = \"10.18653/v1/2022.findings-emnlp.494\",\n pages = \"6636--6649\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.494.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.494/", + "pdf_size": 354369, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17094724256562059480&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science, University of Cape Town; Department of Computer Science, University of Cape Town", + "aff_domain": "myuct.ac.za;cs.uct.ac.za", + "email": "myuct.ac.za;cs.uct.ac.za", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Cape Town", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.uct.ac.za", + "aff_unique_abbr": "UCT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "South Africa" + }, + { + "id": "2022.findings-emnlp.69", + "title": "Subword-Delimited Downsampling for Better Character-Level Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Subword-level models have been the dominant paradigm in NLP. However, character-level models have the benefit of seeing each character individually, providing the model with more detailed information that ultimately could lead to better models. Recent works have shown character-level models to be competitive with subword models, but costly in terms of time and computation. Character-level models with a downsampling component alleviate this, but at the cost of quality, particularly for machine translation. This work analyzes the problems of previous downsampling methods and introduces a novel downsampling method which is informed by subwords.This new downsampling method not only outperforms existing downsampling methods, showing that downsampling characters can be done without sacrificing quality, but also leads to promising performance compared to subword models for translation.", + "author": "Lukas Edman; Antonio Toral; Gertjan van Noord", + "authorids": "/l/lukas-edman/; /a/antonio-toral/; /g/gertjan-van-noord/", + "bibtex": "@inproceedings{edman-etal-2022-subword,\n title = \"Subword-Delimited Downsampling for Better Character-Level Translation\",\n author = \"Edman, Lukas and\n Toral, Antonio and\n van Noord, Gertjan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.69/\",\n doi = \"10.18653/v1/2022.findings-emnlp.69\",\n pages = \"981--992\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.69.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.69/", + "pdf_size": 366402, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1186479741852680006&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Center for Language and Cognition; Center for Language and Cognition; Center for Language and Cognition", + "aff_domain": "rug.nl;rug.nl;rug.nl", + "email": "rug.nl;rug.nl;rug.nl", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Center for Language and Cognition", + "aff_unique_dep": "Department of Language and Cognition", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "", + "aff_country_unique": "" + }, + { + "id": "2022.emnlp-main.81", + "title": "Successive Prompting for Decomposing Complex Questions", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Answering complex questions that require making latent decisions is a challenging task, especially when limited supervision is available. Recent works leverage the capabilities of large language models (LMs) to perform complex question answering in a few-shot setting by demonstrating how to output intermediate rationalizations while solving the complex question in a single pass. We introduce \u201cSuccessive Prompting\u201d where, we iteratively break down a complex task into a simple task, solve it, and then repeat the process until we get the final solution. Successive prompting decouples the supervision for decomposing complex questions from the supervision for answering simple questions, allowing us to (1) have multiple opportunities to query in-context examples at each reasoning step (2) learn question decomposition separately from question answering, including using synthetic data, and (3) use bespoke (fine-tuned) components for reasoning steps where a large LM does not perform well. The intermediate supervision is typically manually written, which can be expensive to collect. We introduce a way to generate synthetic dataset which can be used to bootstrap model\u2019s ability to decompose and answer intermediate questions. Our best model (with successive prompting) achieves an improvement in F1 of ~5% when compared with a state-of-the-art model with synthetic augmentations and few-shot version of the DROP dataset.", + "author": "Dheeru Dua; Shivanshu Gupta; Sameer Singh; Matt Gardner", + "authorids": "/d/dheeru-dua/; /s/shivanshu-gupta/; /s/sameer-singh/; /m/matt-gardner/", + "bibtex": "@inproceedings{dua-etal-2022-successive,\n title = \"Successive Prompting for Decomposing Complex Questions\",\n author = \"Dua, Dheeru and\n Gupta, Shivanshu and\n Singh, Sameer and\n Gardner, Matt\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.81/\",\n doi = \"10.18653/v1/2022.emnlp-main.81\",\n pages = \"1251--1265\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.81.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.81/", + "pdf_size": 280982, + "gs_citation": 111, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18044102687778404844&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 4, + "aff": "University of California, Irvine, USA+Allen Institute for Artificial Intelligence; University of California, Irvine, USA+Allen Institute for Artificial Intelligence; University of California, Irvine, USA+Allen Institute for Artificial Intelligence; Microsoft Semantic Machines", + "aff_domain": "uci.edu;uci.edu;uci.edu;microsoft.com", + "email": "uci.edu;uci.edu;uci.edu;microsoft.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0+1;2", + "aff_unique_norm": "University of California, Irvine;Allen Institute for Artificial Intelligence;Microsoft", + "aff_unique_dep": ";;Semantic Machines", + "aff_unique_url": "https://www.uci.edu;https://allenai.org;https://www.microsoft.com", + "aff_unique_abbr": "UCI;AI2;Microsoft", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Irvine;", + "aff_country_unique_index": "0+0;0+0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.490", + "title": "Summarization as Indirect Supervision for Relation Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Relation extraction (RE) models have been challenged by their reliance on training data with expensive annotations. Considering that summarization tasks aim at acquiring concise expressions of synoptical information from the longer context, these tasks naturally align with the objective of RE, i.e., extracting a kind of synoptical information that describes the relation of entity mentions. We present SuRE, which converts RE into a summarization formulation. SuRE leads to more precise and resource-efficient RE based on indirect supervision from summarization tasks. To achieve this goal, we develop sentence and relation conversion techniques that essentially bridge the formulation of summarization and RE tasks. We also incorporate constraint decoding techniques with Trie scoring to further enhance summarization-based RE with robust inference. Experiments on three RE datasets demonstrate the effectiveness of SuRE in both full-dataset and low-resource settings, showing that summarization is a promising source of indirect supervision signals to improve RE models.", + "author": "Keming Lu; I-Hung Hsu; Wenxuan Zhou; Mingyu Derek Ma; Muhao Chen", + "authorids": "/k/keming-lu/; /i/i-hung-hsu/; /w/wenxuan-zhou/; /m/mingyu-derek-ma/; /m/muhao-chen/", + "bibtex": "@inproceedings{lu-etal-2022-summarization,\n title = \"Summarization as Indirect Supervision for Relation Extraction\",\n author = \"Lu, Keming and\n Hsu, I-Hung and\n Zhou, Wenxuan and\n Ma, Mingyu Derek and\n Chen, Muhao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.490/\",\n doi = \"10.18653/v1/2022.findings-emnlp.490\",\n pages = \"6575--6594\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.490.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.490/", + "pdf_size": 365976, + "gs_citation": 52, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9486884416911394264&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "University of Southern California; University of Southern California; University of Southern California; University of California, Los Angeles; University of Southern California", + "aff_domain": "usc.edu;usc.edu;usc.edu;cs.ucla.edu;usc.edu", + "email": "usc.edu;usc.edu;usc.edu;cs.ucla.edu;usc.edu", + "github": "https://github.com/luka-group/SuRE", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "University of Southern California;University of California, Los Angeles", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.usc.edu;https://www.ucla.edu", + "aff_unique_abbr": "USC;UCLA", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Los Angeles", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.250", + "title": "Summarizing Community-based Question-Answer Pairs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Community-based Question Answering (CQA), which allows users to acquire their desired information, has increasingly become an essential component of online services in various domains such as E-commerce, travel, and dining. However, an overwhelming number of CQA pairs makes it difficult for users without particular intent to find useful information spread over CQA pairs. To help users quickly digest the key information, we propose the novel CQA summarization task that aims to create a concise summary from CQA pairs. To this end, we first design a multi-stage data annotation process and create a benchmark dataset, COQASUM, based on the Amazon QA corpus. We then compare a collection of extractive and abstractive summarization methods and establish a strong baseline approach DedupLED for the CQA summarization task. Our experiment further confirms two key challenges, sentence-type transfer and deduplication removal, towards the CQA summarization task. Our data and code are publicly available.", + "author": "Ting-Yao Hsu; Yoshi Suhara; Xiaolan Wang", + "authorids": "/t/ting-yao-hsu/; /y/yoshi-suhara/; /x/xiaolan-wang/", + "bibtex": "@inproceedings{hsu-etal-2022-summarizing,\n title = \"Summarizing Community-based Question-Answer Pairs\",\n author = \"Hsu, Ting-Yao and\n Suhara, Yoshi and\n Wang, Xiaolan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.250/\",\n doi = \"10.18653/v1/2022.emnlp-main.250\",\n pages = \"3798--3808\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.250.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.250/", + "pdf_size": 879919, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14223158860079363407&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Pennsylvania State University + Megagon Labs; Grammarly + Megagon Labs; Meta AI + Megagon Labs", + "aff_domain": "psu.edu;grammarly.com;meta.com", + "email": "psu.edu;grammarly.com;meta.com", + "github": "https://github.com/megagonlabs/qa-summarization", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2+1;3+1", + "aff_unique_norm": "Pennsylvania State University;Megagon Labs;Grammarly;Meta Platforms, Inc.", + "aff_unique_dep": ";;;Meta AI", + "aff_unique_url": "https://www.psu.edu;https://www.megagonlabs.com;https://www.grammarly.com;https://meta.com", + "aff_unique_abbr": "PSU;;Grammarly;Meta", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.162", + "title": "Summarizing Procedural Text: Data and Approach", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Procedural text is a widely used genre that contains many steps of instructions of how to cook a dish or how to conduct a chemical experiment and analyze the procedural text has become a popular task in the NLP field. Since the procedural text can be very long and contains many details, summarizing the whole procedural text or giving an overview for each complicated procedure step can save time for readers and help them to capture the core information in the text. In this paper, we propose the procedural text summarization task with two summarization granularity: step-view and global-view, which summarizes each step in the procedural text separately or gives an overall summary for all steps respectively. To tackle this task, we propose an Entity-State Graph-based Summarizer (ESGS) which is based on state-of-the-art entity state tracking methods and constructs a heterogeneous graph to aggregate contextual information for each procedure. In order to help the summarization model focus on the salient entities, we propose to use the contextualized procedure graph representation to predict the salient entities. Experiments conducted on two datasets verify the effectiveness of our proposed model. Our code and datasets will be released on https://github.com/gsh199449/procedural-summ.", + "author": "Shen Gao; Haotong Zhang; Xiuying Chen; Rui Yan; Dongyan Zhao", + "authorids": "/s/shen-gao/; /h/haotong-zhang/; /x/xiuying-chen/; /r/rui-yan/; /d/dongyan-zhao/", + "bibtex": "@inproceedings{gao-etal-2022-summarizing-procedural,\n title = \"Summarizing Procedural Text: Data and Approach\",\n author = \"Gao, Shen and\n Zhang, Haotong and\n Chen, Xiuying and\n Yan, Rui and\n Zhao, Dongyan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.162/\",\n doi = \"10.18653/v1/2022.findings-emnlp.162\",\n pages = \"2216--2225\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.162.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.162/", + "pdf_size": 426918, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3933771001507985252&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff": "School of Computer Science and Technology, Shandong University; Department of Mathematics, National University of Singapore; King Abdullah University of Science and Technology; Wangxuan Institute of Computer Technology, Peking University; Gaoling School of Arti\ufb01cial Intelligence, Renmin Univ. of China", + "aff_domain": "sdu.edu.cn;u.nus.edu;kaust.edu.sa;pku.edu.cn;ruc.edu.cn", + "email": "sdu.edu.cn;u.nus.edu;kaust.edu.sa;pku.edu.cn;ruc.edu.cn", + "github": "https://github.com/gsh199449/procedural-summ", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;4", + "aff_unique_norm": "Shandong University;National University of Singapore;King Abdullah University of Science and Technology;Peking University;Renmin University of China", + "aff_unique_dep": "School of Computer Science and Technology;Department of Mathematics;;Wangxuan Institute of Computer Technology;School of Arti\ufb01cial Intelligence", + "aff_unique_url": "http://www.sdu.edu.cn;https://www.nus.edu.sg;https://www.kast.kau.edu.sa;http://www.pku.edu.cn;http://www.ruc.edu.cn", + "aff_unique_abbr": ";NUS;KAUST;PKU;RUC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Gaoling", + "aff_country_unique_index": "0;1;2;0;0", + "aff_country_unique": "China;Singapore;Saudi Arabia" + }, + { + "id": "2022.emnlp-main.340", + "title": "Super-NaturalInstructions: Generalization via Declarative Instructions on 1600+ NLP Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "How well can NLP models generalize to a variety of unseen tasks when provided with task instructions? To address this question, we first introduce Super-NaturalInstructions, a benchmark of 1,616 diverse NLP tasks and their expert-written instructions. Our collection covers 76 distinct task types, including but not limited to classification, extraction, infilling, sequence tagging, text rewriting, and text composition. This large and diverse collection of tasks enables rigorous benchmarking of cross-task generalization under instructions\u2014training models to follow instructions on a subset of tasks and evaluating them on the remaining unseen ones.Furthermore, we build Tk-Instruct, a transformer model trained to follow a variety of in-context instructions (plain language task definitions or k-shot examples). Our experiments show that Tk-Instruct outperforms existing instruction-following models such as InstructGPT by over 9% on our benchmark despite being an order of magnitude smaller. We further analyze generalization as a function of various scaling parameters, such as the number of observed tasks, the number of instances per task, and model sizes. We hope our dataset and model facilitate future progress towards more general-purpose NLP models.", + "author": "Yizhong Wang; Swaroop Mishra; Pegah Alipoormolabashi; Yeganeh Kordi; Amirreza Mirzaei; Atharva Naik; Arjun Ashok; Arut Selvan Dhanasekaran; Anjana Arunkumar; David Stap; Eshaan Pathak; Giannis Karamanolakis; Haizhi Lai; Ishan Purohit; Ishani Mondal; Jacob Anderson; Kirby Kuznia; Krima Doshi; Kuntal Kumar Pal; Maitreya Patel; Mehrad Moradshahi; Mihir Parmar; Mirali Purohit; Neeraj Varshney; Phani Rohitha Kaza; Pulkit Verma; Ravsehaj Singh Puri; Rushang Karia; Savan Doshi; Shailaja Keyur Sampat; Siddhartha Mishra; Sujan Reddy A; Sumanta Patro; Tanay Dixit; Xudong Shen", + "authorids": "/y/yizhong-wang/; /s/swaroop-mishra/; /p/pegah-alipoormolabashi/; /y/yeganeh-kordi/; /a/amirreza-mirzaei/; /a/atharva-naik/; /a/arjun-ashok/; /a/arut-selvan-dhanasekaran/; /a/anjana-arunkumar/; /d/david-stap/; /e/eshaan-pathak/; /g/giannis-karamanolakis/; /h/haizhi-lai/; /i/ishan-purohit/; /i/ishani-mondal/; /j/jacob-anderson/; /k/kirby-kuznia/; /k/krima-doshi/; /k/kuntal-kumar-pal/; /m/maitreya-patel/; /m/mehrad-moradshahi/; /m/mihir-parmar/; /m/mirali-purohit/; /n/neeraj-varshney/; /p/phani-rohitha-kaza/; /p/pulkit-verma/; /r/ravsehaj-singh-puri/; /r/rushang-karia/; /s/savan-doshi/; /s/shailaja-keyur-sampat/; /s/siddhartha-mishra/; /s/sujan-reddy-a/; /s/sumanta-patro/; /t/tanay-dixit/; /x/xudong-shen/", + "bibtex": "@inproceedings{wang-etal-2022-super,\n title = \"Super-{N}atural{I}nstructions: Generalization via Declarative Instructions on 1600+ {NLP} Tasks\",\n author = \"Wang, Yizhong and\n Mishra, Swaroop and\n Alipoormolabashi, Pegah and\n Kordi, Yeganeh and\n Mirzaei, Amirreza and\n Naik, Atharva and\n Ashok, Arjun and\n Dhanasekaran, Arut Selvan and\n Arunkumar, Anjana and\n Stap, David and\n Pathak, Eshaan and\n Karamanolakis, Giannis and\n Lai, Haizhi and\n Purohit, Ishan and\n Mondal, Ishani and\n Anderson, Jacob and\n Kuznia, Kirby and\n Doshi, Krima and\n Pal, Kuntal Kumar and\n Patel, Maitreya and\n Moradshahi, Mehrad and\n Parmar, Mihir and\n Purohit, Mirali and\n Varshney, Neeraj and\n Kaza, Phani Rohitha and\n Verma, Pulkit and\n Puri, Ravsehaj Singh and\n Karia, Rushang and\n Doshi, Savan and\n Sampat, Shailaja Keyur and\n Mishra, Siddhartha and\n Reddy A, Sujan and\n Patro, Sumanta and\n Dixit, Tanay and\n Shen, Xudong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.340/\",\n doi = \"10.18653/v1/2022.emnlp-main.340\",\n pages = \"5085--5109\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.340.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.340/", + "pdf_size": 1618177, + "gs_citation": 582, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12793877315457456487&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 8, + "aff": ";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;", + "aff_domain": ";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;", + "email": ";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;", + "github": "", + "project": "https://instructions.apps.allenai.org", + "author_num": 35 + }, + { + "id": "2022.emnlp-main.347", + "title": "Supervised Prototypical Contrastive Learning for Emotion Recognition in Conversation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Capturing emotions within a conversation plays an essential role in modern dialogue systems. However, the weak correlation between emotions and semantics brings many challenges to emotion recognition in conversation (ERC). Even semantically similar utterances, the emotion may vary drastically depending on contexts or speakers. In this paper, we propose a Supervised Prototypical Contrastive Learning (SPCL) loss for the ERC task. Leveraging the Prototypical Network, the SPCL targets at solving the imbalanced classification problem through contrastive learning and does not require a large batch size. Meanwhile, we design a difficulty measure function based on the distance between classes and introduce curriculum learning to alleviate the impact of extreme samples. We achieve state-of-the-art results on three widely used benchmarks. Further, we conduct analytical experiments to demonstrate the effectiveness of our proposed SPCL and curriculum learning strategy.", + "author": "Xiaohui Song; Longtao Huang; Hui Xue; Songlin Hu", + "authorids": "/x/xiaohui-song/; /l/longtao-huang/; /h/hui-xue/; /s/songlin-hu/", + "bibtex": "@inproceedings{song-etal-2022-supervised,\n title = \"Supervised Prototypical Contrastive Learning for Emotion Recognition in Conversation\",\n author = \"Song, Xiaohui and\n Huang, Longtao and\n Xue, Hui and\n Hu, Songlin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.347/\",\n doi = \"10.18653/v1/2022.emnlp-main.347\",\n pages = \"5197--5206\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.347.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.347/", + "pdf_size": 519288, + "gs_citation": 103, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7185291030079661322&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Institute of Information Engineering, Chinese Academy of Sciences + School of Cyber Security, University of Chinese Academy of Sciences; Alibaba Group; Alibaba Group; Institute of Information Engineering, Chinese Academy of Sciences + School of Cyber Security, University of Chinese Academy of Sciences", + "aff_domain": "iie.ac.cn;alibaba-inc.com;alibaba-inc.com;iie.ac.cn", + "email": "iie.ac.cn;alibaba-inc.com;alibaba-inc.com;iie.ac.cn", + "github": "https://github.com/caskcsg/SPCL", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;2;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Alibaba Group", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.alibaba.com", + "aff_unique_abbr": "CAS;UCAS;Alibaba", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.677", + "title": "Symptom Identification for Interpretable Detection of Multiple Mental Disorders on Social Media", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Mental disease detection (MDD) from social media has suffered from poor generalizability and interpretability, due to lack of symptom modeling. This paper introduces PsySym, the first annotated symptom identification corpus of multiple psychiatric disorders, to facilitate further research progress. PsySym is annotated according to a knowledge graph of the 38 symptom classes related to 7 mental diseases complied from established clinical manuals and scales, and a novel annotation framework for diversity and quality. Experiments show that symptom-assisted MDD enabled by PsySym can outperform strong pure-text baselines. We also exhibit the convincing MDD explanations provided by symptom predictions with case studies, and point to their further potential applications.", + "author": "Zhiling Zhang; Siyuan Chen; Mengyue Wu; Kenny Zhu", + "authorids": "/z/zhiling-zhang/; /s/siyuan-chen/; /m/mengyue-wu/; /k/kenny-zhu/", + "bibtex": "@inproceedings{zhang-etal-2022-symptom,\n title = \"Symptom Identification for Interpretable Detection of Multiple Mental Disorders on Social Media\",\n author = \"Zhang, Zhiling and\n Chen, Siyuan and\n Wu, Mengyue and\n Zhu, Kenny\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.677/\",\n doi = \"10.18653/v1/2022.emnlp-main.677\",\n pages = \"9970--9985\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.677.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.677/", + "pdf_size": 881407, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5406338393040224427&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Shanghai Jiao Tong University", + "aff_domain": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;cs.sjtu.edu.cn", + "email": "sjtu.edu.cn;sjtu.edu.cn;sjtu.edu.cn;cs.sjtu.edu.cn", + "github": "https://github.com/blmoistawinde/EMNLP22-PsySym", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Shanghai Jiao Tong University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.sjtu.edu.cn", + "aff_unique_abbr": "SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.162", + "title": "SynGEC: Syntax-Enhanced Grammatical Error Correction with a Tailored GEC-Oriented Parser", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This work proposes a syntax-enhanced grammatical error correction (GEC) approach named SynGEC that effectively incorporates dependency syntactic information into the encoder part of GEC models. The key challenge for this idea is that off-the-shelf parsers are unreliable when processing ungrammatical sentences. To confront this challenge, we propose to build a tailored GEC-oriented parser (GOPar) using parallel GEC training data as a pivot. First, we design an extended syntax representation scheme that allows us to represent both grammatical errors and syntax in a unified tree structure. Then, we obtain parse trees of the source incorrect sentences by projecting trees of the target correct sentences. Finally, we train GOPar with such projected trees. For GEC, we employ the graph convolution network to encode source-side syntactic information produced by GOPar, and fuse them with the outputs of the Transformer encoder. Experiments on mainstream English and Chinese GEC datasets show that our proposed SynGEC approach consistently and substantially outperforms strong baselines and achieves competitive performance. Our code and data are all publicly available at https://github.com/HillZhang1999/SynGEC.", + "author": "Yue Zhang; Bo Zhang; Zhenghua Li; Zuyi Bao; Chen Li; Min Zhang", + "authorids": "/y/yue-zhang/; /b/bo-zhang/; /z/zhenghua-li/; /z/zuyi-bao/; /c/chen-li/; /m/min-zhang/", + "bibtex": "@inproceedings{zhang-etal-2022-syngec,\n title = \"{S}yn{GEC}: Syntax-Enhanced Grammatical Error Correction with a Tailored {GEC}-Oriented Parser\",\n author = \"Zhang, Yue and\n Zhang, Bo and\n Li, Zhenghua and\n Bao, Zuyi and\n Li, Chen and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.162/\",\n doi = \"10.18653/v1/2022.emnlp-main.162\",\n pages = \"2518--2531\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.162.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.162/", + "pdf_size": 586540, + "gs_citation": 54, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2604370963937669701&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Institute of Artificial Intelligence, School of Computer Science and Technology, Soochow University, China+DAMO Academy, Alibaba Group, China; DAMO Academy, Alibaba Group, China; Institute of Artificial Intelligence, School of Computer Science and Technology, Soochow University, China; DAMO Academy, Alibaba Group, China; DAMO Academy, Alibaba Group, China; Institute of Artificial Intelligence, School of Computer Science and Technology, Soochow University, China", + "aff_domain": "stu.suda.edu.cn;alibaba-inc.com;suda.edu.cn;alibaba-inc.com;alibaba-inc.com;suda.edu.cn", + "email": "stu.suda.edu.cn;alibaba-inc.com;suda.edu.cn;alibaba-inc.com;alibaba-inc.com;suda.edu.cn", + "github": "https://github.com/HillZhang1999/SynGEC", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;1;0;1;1;0", + "aff_unique_norm": "Soochow University;Alibaba Group", + "aff_unique_dep": "Institute of Artificial Intelligence, School of Computer Science and Technology;DAMO Academy", + "aff_unique_url": "http://www.soochow.edu.cn;https://www.alibaba.com", + "aff_unique_abbr": "Soochow U;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.452", + "title": "Synergy with Translation Artifacts for Training and Inference in Multilingual Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Translation has played a crucial role in improving the performance on multilingual tasks: (1) to generate the target language data from the source language data for training and (2) to generate the source language data from the target language data for inference. However, prior works have not considered the use of both translations simultaneously. This paper shows that combining them can synergize the results on various multilingual sentence classification tasks. We empirically find that translation artifacts stylized by translators are the main factor of the performance gain. Based on this analysis, we adopt two training methods, SupCon and MixUp, considering translation artifacts. Furthermore, we propose a cross-lingual fine-tuning algorithm called MUSC, which uses SupCon and MixUp jointly and improves the performance. Our code is available at https://github.com/jongwooko/MUSC.", + "author": "Jaehoon Oh; Jongwoo Ko; Se-Young Yun", + "authorids": "/j/jaehoon-oh/; /j/jongwoo-ko/; /s/se-young-yun/", + "bibtex": "@inproceedings{oh-etal-2022-synergy,\n title = \"Synergy with Translation Artifacts for Training and Inference in Multilingual Tasks\",\n author = \"Oh, Jaehoon and\n Ko, Jongwoo and\n Yun, Se-Young\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.452/\",\n doi = \"10.18653/v1/2022.emnlp-main.452\",\n pages = \"6747--6754\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.452.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.452/", + "pdf_size": 573690, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15253515791572377950&as_sdt=20000005&sciodt=0,21&hl=en", + "gs_version_total": 4, + "aff": "Graduate School of DS, KAIST; Graduate School of AI, KAIST; Graduate School of AI, KAIST", + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr", + "github": "https://github.com/jongwooko/MUSC", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "KAIST", + "aff_unique_dep": "Graduate School of Data Science", + "aff_unique_url": "https://www.kaist.ac.kr", + "aff_unique_abbr": "KAIST", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.emnlp-main.272", + "title": "Syntactic Multi-view Learning for Open Information Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Open Information Extraction (OpenIE) aims to extract relational tuples from open-domain sentences. Traditional rule-based or statistical models were developed based on syntactic structure of sentence, identified by syntactic parsers. However, previous neural OpenIE models under-explored the useful syntactic information. In this paper, we model both constituency and dependency trees into word-level graphs, and enable neural OpenIE to learn from the syntactic structures. To better fuse heterogeneous information from the two graphs, we adopt multi-view learning to capture multiple relationships from them. Finally, the finetuned constituency and dependency representations are aggregated with sentential semantic representations for tuple generation. Experiments show that both constituency and dependency information, and the multi-view learning are effective.", + "author": "Kuicai Dong; Aixin Sun; Jung-Jae Kim; Xiaoli Li", + "authorids": "/k/kuicai-dong/; /a/aixin-sun/; /j/jung-jae-kim/; /x/xiaoli-li/", + "bibtex": "@inproceedings{dong-etal-2022-syntactic,\n title = \"Syntactic Multi-view Learning for Open Information Extraction\",\n author = \"Dong, Kuicai and\n Sun, Aixin and\n Kim, Jung-Jae and\n Li, Xiaoli\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.272/\",\n doi = \"10.18653/v1/2022.emnlp-main.272\",\n pages = \"4072--4083\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.272.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.272/", + "pdf_size": 849265, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13774420616815242092&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "School of Computer Science and Engineering, Nanyang Technological University, Singapore; School of Computer Science and Engineering, Nanyang Technological University, Singapore; Institute for Infocomm Research, A*STAR, Singapore; School of Computer Science and Engineering, Nanyang Technological University, Singapore + Institute for Infocomm Research, A*STAR, Singapore + A*STAR Centre for Frontier AI Research, Singapore", + "aff_domain": "e.ntu.edu.sg;ntu.edu.sg;i2r.a-star.edu.sg;i2r.a-star.edu.sg", + "email": "e.ntu.edu.sg;ntu.edu.sg;i2r.a-star.edu.sg;i2r.a-star.edu.sg", + "github": "https://github.com/daviddongkc/smile_oie", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0+1+2", + "aff_unique_norm": "Nanyang Technological University;Institute for Infocomm Research;A*STAR Centre for Frontier AI Research", + "aff_unique_dep": "School of Computer Science and Engineering;;", + "aff_unique_url": "https://www.ntu.edu.sg;https://www.i2r.a-star.edu.sg;https://www.a-star.edu.sg", + "aff_unique_abbr": "NTU;I2R;A*STAR", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Singapore;", + "aff_country_unique_index": "0;0;0;0+0+0", + "aff_country_unique": "Singapore" + }, + { + "id": "2022.findings-emnlp.60", + "title": "Syntactic and Semantic Uniformity for Semantic Parsing and Task-Oriented Dialogue Systems", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This paper proposes a data representation framework for semantic parsing and task-oriented dialogue systems, aiming to achieve a uniform representation for syntactically and semantically diverse machine-readable formats.Current NLP systems heavily rely on adapting pre-trained language models to specific tasks, and this approach has been proven effective for modeling natural language texts.However, little attention has been paid to the representation of machine-readable formats, such as database queries and dialogue states.We present a method for converting original machine-readable formats of semantic parsing and task-oriented dialogue datasets into a syntactically and semantically uniform representation.We define a meta grammar for syntactically uniform representations and translate semantically equivalent functions into a uniform vocabulary.Empirical experiments on 13 datasets show that accuracy consistently improves over original formats, revealing the advantage of the proposed representation.Additionally, we show that the proposed representation allows for transfer learning across datasets.", + "author": "Bowen Chen; Yusuke Miyao", + "authorids": "/b/bowen-chen/; /y/yusuke-miyao/", + "bibtex": "@inproceedings{chen-miyao-2022-syntactic,\n title = \"Syntactic and Semantic Uniformity for Semantic Parsing and Task-Oriented Dialogue Systems\",\n author = \"Chen, Bowen and\n Miyao, Yusuke\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.60/\",\n doi = \"10.18653/v1/2022.findings-emnlp.60\",\n pages = \"855--867\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.60.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.60/", + "pdf_size": 1355945, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2895259654815211397&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Harbin Institute of Technology; The University of Tokyo", + "aff_domain": "gmail.com;is.s.u-tokyo.ac.jp", + "email": "gmail.com;is.s.u-tokyo.ac.jp", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "Harbin Institute of Technology;University of Tokyo", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.u-tokyo.ac.jp", + "aff_unique_abbr": "HIT;UTokyo", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Harbin;", + "aff_country_unique_index": "0;1", + "aff_country_unique": "China;Japan" + }, + { + "id": "2022.emnlp-main.401", + "title": "Syntactically Rich Discriminative Training: An Effective Method for Open Information Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Open information extraction (OIE) is the task of extracting facts \"(Subject, Relation, Object)\u201d from natural language text. We propose several new methods for training neural OIE models in this paper. First, we propose a novel method for computing syntactically rich text embeddings using the structure of dependency trees. Second, we propose a new discriminative training approach to OIE in which tokens in the generated fact are classified as \u201creal\u201d or \u201cfake\u201d, i.e., those tokens that are in both the generated and gold tuples, and those that are only in the generated tuple but not in the gold tuple. We also address the issue of repetitive tokens in generated facts and improve the models\u2019 ability to generate implicit facts. Our approach reduces repetitive tokens by a factor of 23%. Finally, we present paraphrased versions of the CaRB, OIE2016, and LSOIE datasets, and show that the models\u2019 performance substantially improves when trained on augmented datasets. Our best model beats the SOTA of IMoJIE on the recent CaRB dataset, with an improvement of 39.63% in F1 score.", + "author": "Frank Mtumbuka; Thomas Lukasiewicz", + "authorids": "/f/frank-mtumbuka/; /t/thomas-lukasiewicz/", + "bibtex": "@inproceedings{mtumbuka-lukasiewicz-2022-syntactically,\n title = \"Syntactically Rich Discriminative Training: An Effective Method for Open Information Extraction\",\n author = \"Mtumbuka, Frank and\n Lukasiewicz, Thomas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.401/\",\n doi = \"10.18653/v1/2022.emnlp-main.401\",\n pages = \"5972--5987\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.401.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.401/", + "pdf_size": 409152, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3747698843403836332&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Cardiff University, Cardiff, UK+Department of Computer Science, University of Oxford, UK; Institute of Logic and Computation, TU Wien, Austria+Department of Computer Science, University of Oxford, UK", + "aff_domain": "cardiff.ac.uk;tuwien.ac.at", + "email": "cardiff.ac.uk;tuwien.ac.at", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;2+1", + "aff_unique_norm": "Cardiff University;University of Oxford;TU Wien", + "aff_unique_dep": ";Department of Computer Science;Institute of Logic and Computation", + "aff_unique_url": "https://www.cardiff.ac.uk;https://www.ox.ac.uk;https://www.tuwien.ac.at", + "aff_unique_abbr": "Cardiff;Oxford;TU Wien", + "aff_campus_unique_index": "0;", + "aff_campus_unique": "Cardiff;", + "aff_country_unique_index": "0+0;1+0", + "aff_country_unique": "United Kingdom;Austria" + }, + { + "id": "2022.findings-emnlp.465", + "title": "Syntactically Robust Training on Partially-Observed Data for Open Information Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Open Information Extraction models have shown promising results with sufficient supervision. However, these models face a fundamental challenge that the syntactic distribution of training data is partially observable in comparison to the real world. In this paper, we propose a syntactically robust training framework that enables models to be trained on a syntactic-abundant distribution based on diverse paraphrase generation. To tackle the intrinsic problem of knowledge deformation of paraphrasing, two algorithms based on semantic similarity matching and syntactic tree walking are used to restore the expressionally transformed knowledge. The training framework can be generally applied to other syntactic partial observable domains. Based on the proposed framework, we build a new evaluation set called CaRB-AutoPara, a syntactically diverse dataset consistent with the real-world setting for validating the robustness of the models. Experiments including a thorough analysis show that the performance of the model degrades with the increase of the difference in syntactic distribution, while our framework gives a robust boundary.", + "author": "Ji Qi; Yuxiang Chen; Lei Hou; Juanzi Li; Bin Xu", + "authorids": "/j/ji-qi/; /y/yuxiang-chen/; /l/lei-hou/; /j/juanzi-li/; /b/bin-xu/", + "bibtex": "@inproceedings{qi-etal-2022-syntactically,\n title = \"Syntactically Robust Training on Partially-Observed Data for Open Information Extraction\",\n author = \"Qi, Ji and\n Chen, Yuxiang and\n Hou, Lei and\n Li, Juanzi and\n Xu, Bin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.465/\",\n doi = \"10.18653/v1/2022.findings-emnlp.465\",\n pages = \"6245--6257\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.465.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.465/", + "pdf_size": 1013553, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15104219107083207447&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science and Technology, BNRist, Tsinghua University, Beijing, 100084, China; University of California, San Diego; Department of Computer Science and Technology, BNRist, Tsinghua University, Beijing, 100084, China; Department of Computer Science and Technology, BNRist, Tsinghua University, Beijing, 100084, China; Department of Computer Science and Technology, BNRist, Tsinghua University, Beijing, 100084, China", + "aff_domain": "mails.tsinghua.edu.cn;ucsd.edu; ; ;tsinghua.edu.cn", + "email": "mails.tsinghua.edu.cn;ucsd.edu; ; ;tsinghua.edu.cn", + "github": "https://github.com/qijimrc/RobustOIE", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Tsinghua University;University of California, San Diego", + "aff_unique_dep": "Department of Computer Science and Technology;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://www.ucsd.edu", + "aff_unique_abbr": "THU;UCSD", + "aff_campus_unique_index": "0;1;0;0;0", + "aff_campus_unique": "Beijing;San Diego", + "aff_country_unique_index": "0;1;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.173", + "title": "Syntax-guided Localized Self-attention by Constituency Syntactic Distance", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent works have revealed that Transformers are implicitly learning the syntactic information in its lower layers from data, albeit is highly dependent on the quality and scale of the training data. However, learning syntactic information from data is not necessary if we can leverage an external syntactic parser, which provides better parsing quality with well-defined syntactic structures. This could potentially improve Transformer\u2019s performance and sample efficiency. In this work, we propose a syntax-guided localized self-attention for Transformer that allows directly incorporating grammar structures from an external constituency parser. It prohibits the attention mechanism to overweight the grammatically distant tokens over close ones. Experimental results show that our model could consistently improve translation performance on a variety of machine translation datasets, ranging from small to large dataset sizes, and with different source languages.", + "author": "Shengyuan Hou; Jushi Kai; Haotian Xue; Bingyu Zhu; Bo Yuan; Longtao Huang; Xinbing Wang; Zhouhan Lin", + "authorids": "/s/shengyuan-hou/; /j/jushi-kai/; /h/haotian-xue/; /b/bingyu-zhu/; /b/bo-yuan/; /l/longtao-huang/; /x/xinbing-wang/; /z/zhouhan-lin/", + "bibtex": "@inproceedings{hou-etal-2022-syntax,\n title = \"Syntax-guided Localized Self-attention by Constituency Syntactic Distance\",\n author = \"Hou, Shengyuan and\n Kai, Jushi and\n Xue, Haotian and\n Zhu, Bingyu and\n Yuan, Bo and\n Huang, Longtao and\n Wang, Xinbing and\n Lin, Zhouhan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.173/\",\n doi = \"10.18653/v1/2022.findings-emnlp.173\",\n pages = \"2334--2341\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.173.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.173/", + "pdf_size": 412523, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4774418313281075385&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/LUMIA-Group/distance_transformerexample", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.42", + "title": "System 1 + System 2 = Better World: Neural-Symbolic Chain of Logic Reasoning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Logical reasoning is a challenge for many current NLP neural network models since it requires more than the ability of learning informative representations from data. Inspired by the Dual Process Theory in cognitive science \u2014 which proposes that human cognition process involves two stages: an intuitive, unconscious and fast process relying on perception calledSystem 1, and a logical, conscious and slow process performing complex reasoning called System 2 \u2014 we leverage neural logic reasoning (System 2) on top of the representation learning models (System 1), which conducts explicit neural-based differentiable logical reasoning on top of the representations learned by the base neural models. Based on experiments on the commonsense knowledge graph completion task, we show that the two-system architecture always improves from its System 1 model alone. Experiments also show that both the rule-driven logical regularizer and the data-driven value regularizer are important and the performance improvement is marginal without the two regularizers, which indicates that learning from both logical prior and training data is important for reasoning tasks.", + "author": "Wenyue Hua; Yongfeng Zhang", + "authorids": "/w/wenyue-hua/; /y/yongfeng-zhang/", + "bibtex": "@inproceedings{hua-zhang-2022-system,\n title = \"System 1 + System 2 = Better World: Neural-Symbolic Chain of Logic Reasoning\",\n author = \"Hua, Wenyue and\n Zhang, Yongfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.42/\",\n doi = \"10.18653/v1/2022.findings-emnlp.42\",\n pages = \"601--612\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.42.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.42/", + "pdf_size": 942895, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15826605902592891137&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 8, + "aff": "Department of Computer Science, Rutgers University, New Brunswick, NJ, US; Department of Computer Science, Rutgers University, New Brunswick, NJ, US", + "aff_domain": "rutgers.edu;rutgers.edu", + "email": "rutgers.edu;rutgers.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Rutgers University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.rutgers.edu", + "aff_unique_abbr": "Rutgers", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "New Brunswick", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.50", + "title": "Systematicity in GPT-3\u2019s Interpretation of Novel English Noun Compounds", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Levin et al. (2019) show experimentally that the interpretations of novel English noun compounds (e.g., stew skillet), while not fully compositional, are highly predictable based on whether the modifier and head refer to artifacts or natural kinds. Is the large language model GPT-3 governed by the same interpretive principles? To address this question, we first compare Levin et al.\u2019s experimental data with GPT-3 generations, finding a high degree of similarity. However, this evidence is consistent with GPT-3 reasoning only about specific lexical items rather than the more abstract conceptual categories of Levin et al.\u2019s theory. To probe more deeply, we construct prompts that require the relevant kind of conceptual reasoning. Here, we fail to find convincing evidence that GPT-3 is reasoning about more than just individual lexical items. These results highlight the importance of controlling for low-level distributional regularities when assessing whether a large language model latently encodes a deeper theory.", + "author": "Siyan Li; Riley Carlson; Christopher Potts", + "authorids": "/s/siyan-li/; /r/riley-carlson/; /c/christopher-potts/", + "bibtex": "@inproceedings{li-etal-2022-systematicity,\n title = \"Systematicity in {GPT}-3`s Interpretation of Novel {E}nglish Noun Compounds\",\n author = \"Li, Siyan and\n Carlson, Riley and\n Potts, Christopher\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.50/\",\n doi = \"10.18653/v1/2022.findings-emnlp.50\",\n pages = \"717--728\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.50.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.50/", + "pdf_size": 254854, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10994386912551738563&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Stanford University; Stanford University; Stanford University", + "aff_domain": "stanford.edu;stanford.edu;stanford.edu", + "email": "stanford.edu;stanford.edu;stanford.edu", + "github": "https://github.com/siyan-sylvia-li/systematicity_gpt3/", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.391", + "title": "T-Modules: Translation Modules for Zero-Shot Cross-Modal Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present a new approach to perform zero-shot cross-modal transfer between speech and text for translation tasks. Multilingual speech and text are encoded in a joint fixed-size representation space. Then, we compare different approaches to decode these multimodal and multilingual fixed-size representations, enabling zero-shot translation between languages and modalities. All our models are trained without the need of cross-modal labeled translation data.Despite a fixed-size representation, we achieve very competitive results on several text and speech translation tasks. In particular, we significantly improve the state-of-the-art for zero-shot speech translation on Must-C. Incorporating a speech decoder in our framework, we introduce the first results for zero-shot direct speech-to-speech and text-to-speech translation.", + "author": "Paul-Ambroise Duquenne; Hongyu Gong; Beno\u00eet Sagot; Holger Schwenk", + "authorids": "/p/paul-ambroise-duquenne/; /h/hongyu-gong/; /b/benoit-sagot/; /h/holger-schwenk/", + "bibtex": "@inproceedings{duquenne-etal-2022-modules,\n title = \"{T}-Modules: Translation Modules for Zero-Shot Cross-Modal Machine Translation\",\n author = \"Duquenne, Paul-Ambroise and\n Gong, Hongyu and\n Sagot, Beno{\\^i}t and\n Schwenk, Holger\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.391/\",\n doi = \"10.18653/v1/2022.emnlp-main.391\",\n pages = \"5794--5806\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.391.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.391/", + "pdf_size": 795617, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7728160751872650356&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Meta AI & Inria; Meta AI; Inria; Meta AI", + "aff_domain": "fb.com;fb.com;inria.fr;fb.com", + "email": "fb.com;fb.com;inria.fr;fb.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "Meta AI;Meta Platforms, Inc.;Inria", + "aff_unique_dep": "Meta AI;Meta AI;", + "aff_unique_url": "https://meta.ai;https://meta.com;https://www.inria.fr", + "aff_unique_abbr": "Meta AI;Meta;Inria", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "United States;France" + }, + { + "id": "2022.emnlp-main.602", + "title": "T-STAR: Truthful Style Transfer using AMR Graph as Intermediate Representation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Unavailability of parallel corpora for training text style transfer (TST) models is a very challenging yet common scenario. Also, TST models implicitly need to preserve the content while transforming a source sentence into the target style. To tackle these problems, an intermediate representation is often constructed that is devoid of style while still preserving the meaning of the source sentence. In this work, we study the usefulness of Abstract Meaning Representation (AMR) graph as the intermediate style agnostic representation. We posit that semantic notations like AMR are a natural choice for an intermediate representation. Hence, we propose T-STAR: a model comprising of two components, text-to-AMR encoder and a AMR-to-text decoder. We propose several modeling improvements to enhance the style agnosticity of the generated AMR. To the best of our knowledge, T-STAR is the first work that uses AMR as an intermediate representation for TST. With thorough experimental evaluation we show T-STAR significantly outperforms state of the art techniques by achieving on an average 15.2% higher content preservation with negligible loss (~3%) in style accuracy. Through detailed human evaluation with 90,000 ratings, we also show that T-STAR has upto 50% lesser hallucinations compared to state of the art TST models.", + "author": "Anubhav Jangra; Preksha Nema; Aravindan Raghuveer", + "authorids": "/a/anubhav-jangra/; /p/preksha-nema/; /a/aravindan-raghuveer/", + "bibtex": "@inproceedings{jangra-etal-2022-star,\n title = \"{T}-{STAR}: Truthful Style Transfer using {AMR} Graph as Intermediate Representation\",\n author = \"Jangra, Anubhav and\n Nema, Preksha and\n Raghuveer, Aravindan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.602/\",\n doi = \"10.18653/v1/2022.emnlp-main.602\",\n pages = \"8805--8825\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.602.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.602/", + "pdf_size": 802883, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12339908774920002771&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Google Research, India; Google Research, India; Google Research, India", + "aff_domain": "google.com;google.com;google.com", + "email": "google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "India", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "India" + }, + { + "id": "2022.emnlp-main.369", + "title": "TABS: Efficient Textual Adversarial Attack for Pre-trained NL Code Model Using Semantic Beam Search", + "track": "main", + "status": "Main", + "award": false, + "abstract": "As pre-trained models have shown successful performance in program language processing as well as natural language processing, adversarial attacks on these models also attract attention.However, previous works on black-box adversarial attacks generated adversarial examples in a very inefficient way with simple greedy search. They also failed to find out better adversarial examples because it was hard to reduce the search space without performance loss.In this paper, we propose TABS, an efficient beam search black-box adversarial attack method. We adopt beam search to find out better adversarial examples, and contextual semantic filtering to effectively reduce the search space. Contextual semantic filtering reduces the number of candidate adversarial words considering the surrounding context and the semantic similarity.Our proposed method shows good performance in terms of attack success rate, the number of queries, and semantic similarity in attacking models for two tasks: NL code search classification and retrieval tasks.", + "author": "YunSeok Choi; Hyojun Kim; Jee-Hyong Lee", + "authorids": "/y/yunseok-choi/; /h/hyojun-kim/; /j/jee-hyong-lee/", + "bibtex": "@inproceedings{choi-etal-2022-tabs,\n title = \"{TABS}: Efficient Textual Adversarial Attack for Pre-trained {NL} Code Model Using Semantic Beam Search\",\n author = \"Choi, YunSeok and\n Kim, Hyojun and\n Lee, Jee-Hyong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.369/\",\n doi = \"10.18653/v1/2022.emnlp-main.369\",\n pages = \"5490--5498\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.369.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.369/", + "pdf_size": 268969, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15110089312429055956&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "College of Computing and Informatics, Sungkyunkwan University; College of Computing and Informatics, Sungkyunkwan University; College of Computing and Informatics, Sungkyunkwan University", + "aff_domain": "skku.edu;skku.edu;skku.edu", + "email": "skku.edu;skku.edu;skku.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Sungkyunkwan University", + "aff_unique_dep": "College of Computing and Informatics", + "aff_unique_url": "https://www.sungkyunkwan.ac.kr", + "aff_unique_abbr": "SKKU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.183", + "title": "TAPE: Assessing Few-shot Russian Language Understanding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent advances in zero-shot and few-shot learning have shown promise for a scope of research and practical purposes. However, this fast-growing area lacks standardized evaluation suites for non-English languages, hindering progress outside the Anglo-centric paradigm. To address this line of research, we propose TAPE (Text Attack and Perturbation Evaluation), a novel benchmark that includes six more complex NLU tasks for Russian, covering multi-hop reasoning, ethical concepts, logic and commonsense knowledge. The TAPE\u2019s design focuses on systematic zero-shot and few-shot NLU evaluation: (i) linguistic-oriented adversarial attacks and perturbations for analyzing robustness, and (ii) subpopulations for nuanced interpretation. The detailed analysis of testing the autoregressive baselines indicates that simple spelling-based perturbations affect the performance the most, while paraphrasing the input has a more negligible effect. At the same time, the results demonstrate a significant gap between the neural and human baselines for most tasks. We publicly release TAPE (https://tape-benchmark.com) to foster research on robust LMs that can generalize to new tasks when little to no supervision is available.", + "author": "Ekaterina Taktasheva; Alena Fenogenova; Denis Shevelev; Nadezhda Katricheva; Maria Tikhonova; Albina Akhmetgareeva; Oleg Zinkevich; Anastasiia Bashmakova; Svetlana Iordanskaia; Valentina Kurenshchikova; Alena Spiridonova; Ekaterina Artemova; Tatiana Shavrina; Vladislav Mikhailov", + "authorids": "/e/ekaterina-taktasheva/; /a/alena-fenogenova/; /d/denis-shevelev/; /n/nadezhda-katricheva/; /m/maria-tikhonova/; /a/albina-akhmetgareeva/; /o/oleg-zinkevich/; /a/anastasiia-bashmakova/; /s/svetlana-iordanskaia/; /v/valentina-kurenshchikova/; /a/alena-spiridonova/; /e/ekaterina-artemova/; /t/tatiana-shavrina/; /v/vladislav-mikhailov/", + "bibtex": "@inproceedings{taktasheva-etal-2022-tape,\n title = \"{TAPE}: Assessing Few-shot {R}ussian Language Understanding\",\n author = \"Taktasheva, Ekaterina and\n Fenogenova, Alena and\n Shevelev, Denis and\n Katricheva, Nadezhda and\n Tikhonova, Maria and\n Akhmetgareeva, Albina and\n Zinkevich, Oleg and\n Bashmakova, Anastasiia and\n Iordanskaia, Svetlana and\n Kurenshchikova, Valentina and\n Spiridonova, Alena and\n Artemova, Ekaterina and\n Shavrina, Tatiana and\n Mikhailov, Vladislav\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.183/\",\n doi = \"10.18653/v1/2022.findings-emnlp.183\",\n pages = \"2472--2497\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.183.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.183/", + "pdf_size": 3011186, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16133468332774029369&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "SberDevices+HSE University; SberDevices+Artificial Intelligence Research Institute; SberDevices; SberDevices; SberDevices; SberDevices+HSE University; SberDevices; HSE University; HSE University; HSE University; HSE University; HSE University; Huawei Noah\u2019s Ark lab+CIS LMU Munich; SberDevices", + "aff_domain": "gmail.com; ; ; ; ; ; ; ; ; ; ; ; ; ", + "email": "gmail.com; ; ; ; ; ; ; ; ; ; ; ; ; ", + "github": "", + "project": "tape-benchmark.com", + "author_num": 14, + "aff_unique_index": "0+1;0+2;0;0;0;0+1;0;1;1;1;1;1;3+4;0", + "aff_unique_norm": "SberDevices;Higher School of Economics;Artificial Intelligence Research Institute;Huawei;Ludwig Maximilian University of Munich", + "aff_unique_dep": ";;;Noah\u2019s Ark lab;Computer and Information Science Department", + "aff_unique_url": "https://sberdevices.ru;https://hse.ru;;https://www.huawei.com;https://www.lmu.de", + "aff_unique_abbr": "SberDevices;HSE;;Huawei;LMU", + "aff_campus_unique_index": ";;;1", + "aff_campus_unique": ";Munich", + "aff_country_unique_index": "0+0;0+1;0;0;0;0+0;0;0;0;0;0;0;2+3;0", + "aff_country_unique": "Russia;United States;China;Germany" + }, + { + "id": "2022.emnlp-main.821", + "title": "TASA: Deceiving Question Answering Models by Twin Answer Sentences Attack", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present Twin Answer Sentences Attack (TASA), an adversarial attack method for question answering (QA) models that produces fluent and grammatical adversarial contexts while maintaining gold answers. Despite phenomenal progress on general adversarial attacks, few works have investigated the vulnerability and attack specifically for QA models. In this work, we first explore the biases in the existing models and discover that they mainly rely on keyword matching between the question and context, and ignore the relevant contextual relations for answer prediction.Based on two biases above, TASA attacks the target model in two folds: (1) lowering the model\u2019s confidence on the gold answer with a perturbed answer sentence; (2) misguiding the model towards a wrong answer with a distracting answer sentence. Equipped with designed beam search and filtering methods, TASA can generate more effective attacks than existing textual attack methods while sustaining the quality of contexts, in extensive experiments on five QA datasets and human evaluations.", + "author": "Yu Cao; Dianqi Li; Meng Fang; Tianyi Zhou; Jun Gao; Yibing Zhan; Dacheng Tao", + "authorids": "/y/yu-cao/; /d/dianqi-li/; /m/meng-fang/; /t/tianyi-zhou/; /j/jun-gao/; /y/yibing-zhan/; /d/dacheng-tao/", + "bibtex": "@inproceedings{cao-etal-2022-tasa,\n title = \"{TASA}: Deceiving Question Answering Models by Twin Answer Sentences Attack\",\n author = \"Cao, Yu and\n Li, Dianqi and\n Fang, Meng and\n Zhou, Tianyi and\n Gao, Jun and\n Zhan, Yibing and\n Tao, Dacheng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.821/\",\n doi = \"10.18653/v1/2022.emnlp-main.821\",\n pages = \"11975--11992\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.821.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.821/", + "pdf_size": 655525, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12244837590191108191&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "School of Computer Science, The University of Sydney; University of Washington; University of Liverpool; University of Maryland; Harbin Institute of Technology, Shenzhen; JD Explore Academy; School of Computer Science, The University of Sydney", + "aff_domain": "uni.sydney.edu.au;uw.edu;liverpool.ac.uk;umiacs.umd.edu;stu.hit.edu.cn;jd.com;gmail.com", + "email": "uni.sydney.edu.au;uw.edu;liverpool.ac.uk;umiacs.umd.edu;stu.hit.edu.cn;jd.com;gmail.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;4;5;0", + "aff_unique_norm": "The University of Sydney;University of Washington;University of Liverpool;University of Maryland;Harbin Institute of Technology;JD Explore Academy", + "aff_unique_dep": "School of Computer Science;;;;;", + "aff_unique_url": "https://www.sydney.edu.au;https://www.washington.edu;https://www.liverpool.ac.uk;https://www/umd.edu;http://en.hhit.edu.cn/;", + "aff_unique_abbr": "USYD;UW;Liv Uni;UMD;HIT;", + "aff_campus_unique_index": "0;2;0", + "aff_campus_unique": "Sydney;;Shenzhen", + "aff_country_unique_index": "0;1;2;1;3;0", + "aff_country_unique": "Australia;United States;United Kingdom;China;" + }, + { + "id": "2022.emnlp-main.555", + "title": "TIARA: Multi-grained Retrieval for Robust Question Answering over Large Knowledge Base", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained language models (PLMs) have shown their effectiveness in multiple scenarios. However, KBQA remains challenging, especially regarding coverage and generalization settings. This is due to two main factors: i) understanding the semantics of both questions and relevant knowledge from the KB; ii) generating executable logical forms with both semantic and syntactic correctness. In this paper, we present a new KBQA model, TIARA, which addresses those issues by applying multi-grained retrieval to help the PLM focus on the most relevant KB context, viz., entities, exemplary logical forms, and schema items. Moreover, constrained decoding is used to control the output space and reduce generation errors. Experiments over important benchmarks demonstrate the effectiveness of our approach. TIARA outperforms previous SOTA, including those using PLMs or oracle entity annotations, by at least 4.1 and 1.1 F1 points on GrailQA and WebQuestionsSP, respectively. Specifically on GrailQA, TIARA outperforms previous models in all categories, with an improvement of 4.7 F1 points in zero-shot generalization.", + "author": "Yiheng Shu; Zhiwei Yu; Yuhan Li; B\u00f6rje Karlsson; Tingting Ma; Yuzhong Qu; Chin-Yew Lin", + "authorids": "/y/yiheng-shu/; /z/zhiwei-yu/; /y/yuhan-li/; /b/borje-karlsson/; /t/tingting-ma/; /y/yuzhong-qu/; /c/chin-yew-lin/", + "bibtex": "@inproceedings{shu-etal-2022-tiara,\n title = \"{TIARA}: Multi-grained Retrieval for Robust Question Answering over Large Knowledge Base\",\n author = {Shu, Yiheng and\n Yu, Zhiwei and\n Li, Yuhan and\n Karlsson, B{\\\"o}rje and\n Ma, Tingting and\n Qu, Yuzhong and\n Lin, Chin-Yew},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.555/\",\n doi = \"10.18653/v1/2022.emnlp-main.555\",\n pages = \"8108--8121\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.555.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.555/", + "pdf_size": 499497, + "gs_citation": 91, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1215080707016714824&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "State Key Laboratory of Novel Software Technology, Nanjing University, China; Microsoft Research; Nankai University; Harbin Institute of Technology; State Key Laboratory of Novel Software Technology, Nanjing University, China; Microsoft Research; Microsoft Research", + "aff_domain": "smail.nju.edu.cn;microsoft.com;mail.nankai.edu.cn;microsoft.com;hit.edu.cn;nju.edu.cn;microsoft.com", + "email": "smail.nju.edu.cn;microsoft.com;mail.nankai.edu.cn;microsoft.com;hit.edu.cn;nju.edu.cn;microsoft.com", + "github": "https://github.com/microsoft/KC/tree/main/papers/TIARA", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;0;1;1", + "aff_unique_norm": "Nanjing University;Microsoft Corporation;Nankai University;Harbin Institute of Technology", + "aff_unique_dep": "State Key Laboratory of Novel Software Technology;Microsoft Research;;", + "aff_unique_url": "http://www.nju.edu.cn;https://www.microsoft.com/en-us/research;http://www.nankai.edu.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": "Nanjing U;MSR;NKU;HIT", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Harbin", + "aff_country_unique_index": "0;1;0;0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.301", + "title": "TINA: Textual Inference with Negation Augmentation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Transformer-based language models achieve state-of-the-art results on several natural language processing tasks. One of these is textual entailment, i.e., the task of determining whether a premise logically entails a hypothesis. However, the models perform poorly on this task when the examples contain negations. In this paper, we propose a new definition of textual entailment that captures also negation. This allows us to develop TINA (Textual Inference with Negation Augmentation), a principled technique for negated data augmentation that can be combined with the unlikelihood loss function.Our experiments with different transformer-based models show that our method can significantly improve the performance of the models on textual entailment datasets with negation \u2013 without sacrificing performance on datasets without negation.", + "author": "Chadi Helwe; Simon Coumes; Chlo\u00e9 Clavel; Fabian Suchanek", + "authorids": "/c/chadi-helwe/; /s/simon-coumes/; /c/chloe-clavel/; /f/fabian-suchanek/", + "bibtex": "@inproceedings{helwe-etal-2022-tina,\n title = \"{TINA}: Textual Inference with Negation Augmentation\",\n author = \"Helwe, Chadi and\n Coumes, Simon and\n Clavel, Chlo{\\'e} and\n Suchanek, Fabian\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.301/\",\n doi = \"10.18653/v1/2022.findings-emnlp.301\",\n pages = \"4086--4099\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.301.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.301/", + "pdf_size": 300291, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9293990591891094291&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 18, + "aff": "T\u00e9l\u00e9com Paris, Institut Polytechnique de Paris, France; T\u00e9l\u00e9com Paris, Institut Polytechnique de Paris, France; T\u00e9l\u00e9com Paris, Institut Polytechnique de Paris, France; T\u00e9l\u00e9com Paris, Institut Polytechnique de Paris, France", + "aff_domain": "telecom-paris.fr;telecom-paris.fr;telecom-paris.fr;telecom-paris.fr", + "email": "telecom-paris.fr;telecom-paris.fr;telecom-paris.fr;telecom-paris.fr", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "T\u00e9l\u00e9com Paris", + "aff_unique_dep": "", + "aff_unique_url": "https://www.telecom-paris.fr", + "aff_unique_abbr": "T\u00e9l\u00e9com Paris", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "France" + }, + { + "id": "2022.emnlp-main.273", + "title": "TRIPS: Efficient Vision-and-Language Pre-training with Text-Relevant Image Patch Selection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Vision Transformers (ViTs) have been widely used in large-scale Vision and Language Pre-training (VLP) models. Though previous VLP works have proved the effectiveness of ViTs, they still suffer from computational efficiency brought by the long visual sequence. To tackle this problem, in this paper, we propose an efficient vision-and-language pre-training model with Text-Relevant Image Patch Selection, namely TRIPS, which reduces the visual sequence progressively with a text-guided patch-selection layer in the visual backbone for efficient training and inference. The patch-selection layer can dynamically compute text-dependent visual attention to identify the attentive image tokens with text guidance and fuse inattentive ones in an end-to-end manner. Meanwhile, TRIPS does not introduce extra parameters to ViTs. Experimental results on a variety of popular benchmark datasets demonstrate that TRIPS gain a speedup of 40% over previous similar VLP models, yet with competitive or better downstream task performance.", + "author": "Chaoya Jiang; Haiyang Xu; Chenliang Li; Ming Yan; Wei Ye; Shikun Zhang; Bin Bi; Songfang Huang", + "authorids": "/c/chaoya-jiang/; /h/haiyang-xu/; /c/chenliang-li/; /m/ming-yan/; /w/wei-ye/; /s/shikun-zhang/; /b/bin-bi/; /s/songfang-huang/", + "bibtex": "@inproceedings{jiang-etal-2022-trips,\n title = \"{TRIPS}: Efficient Vision-and-Language Pre-training with Text-Relevant Image Patch Selection\",\n author = \"Jiang, Chaoya and\n Xu, Haiyang and\n Li, Chenliang and\n Yan, Ming and\n Ye, Wei and\n Zhang, Shikun and\n Bi, Bin and\n Huang, Songfang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.273/\",\n doi = \"10.18653/v1/2022.emnlp-main.273\",\n pages = \"4084--4096\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.273.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.273/", + "pdf_size": 1171199, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14370827574954257164&as_sdt=5,39&sciodt=0,39&hl=en", + "gs_version_total": 0, + "aff": "National Engineering Research Center for Software Engineering, Peking University+DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; National Engineering Research Center for Software Engineering, Peking University; National Engineering Research Center for Software Engineering, Peking University; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group", + "aff_domain": "pku.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;pku.edu.cn;pku.edu.cn;alibaba-inc.com;alibaba-inc.com", + "email": "pku.edu.cn;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;pku.edu.cn;pku.edu.cn;alibaba-inc.com;alibaba-inc.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;1;1;1;0;0;1;1", + "aff_unique_norm": "Peking University;Alibaba Group", + "aff_unique_dep": "National Engineering Research Center for Software Engineering;DAMO Academy", + "aff_unique_url": "http://www.pku.edu.cn;https://www.alibaba-group.com", + "aff_unique_abbr": "PKU;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.68", + "title": "TSGP: Two-Stage Generative Prompting for Unsupervised Commonsense Question Answering", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Without training on labeled task data, unsupervised commonsense question answering seems challenging since it requires commonsense knowledge beyond the context of questions. Previous methods typically retrieved from traditional knowledge bases or used pre-trained language models (PrLMs) to generate fixed types of knowledge, which have poor generalization ability.In this paper, we aim to address the above limitation by leveraging the implicit knowledge stored in PrLMs and propose a two-stage prompt-based unsupervised commonsense question answering framework (TSGP). We first use knowledge generation prompts to generate the knowledge required for questions with unlimited types and possible candidate answers independent of specified choices. Then, we further utilize answer generation prompts to generate possible candidate answers independent of specified choices. Experimental results and analysis on three different commonsense reasoning tasks, CommonsenseQA, OpenBookQA, and SocialIQA, demonstrate that TSGP significantly improves the reasoning ability of language models in unsupervised settings.", + "author": "Yueqing Sun; Yu Zhang; Le Qi; Qi Shi", + "authorids": "/y/yueqing-sun/; /y/yu-zhang/; /l/le-qi/; /q/qi-shi/", + "bibtex": "@inproceedings{sun-etal-2022-tsgp,\n title = \"{TSGP}: Two-Stage Generative Prompting for Unsupervised Commonsense Question Answering\",\n author = \"Sun, Yueqing and\n Zhang, Yu and\n Qi, Le and\n Shi, Qi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.68/\",\n doi = \"10.18653/v1/2022.findings-emnlp.68\",\n pages = \"968--980\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.68.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.68/", + "pdf_size": 479965, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11516266884602748442&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, Harbin, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, Harbin, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, Harbin, China; Research Center for Social Computing and Information Retrieval, Harbin Institute of Technology, Harbin, China", + "aff_domain": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "email": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn", + "github": "https://github.com/Yueqing-Sun/TSGP", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Harbin Institute of Technology", + "aff_unique_dep": "Research Center for Social Computing and Information Retrieval", + "aff_unique_url": "http://www.hit.edu.cn/", + "aff_unique_abbr": "HIT", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Harbin", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.145", + "title": "TaCube: Pre-computing Data Cubes for Answering Numerical-Reasoning Questions over Tabular Data", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing auto-regressive pre-trained language models (PLMs) like T5 and BART, have been well applied to table question answering by UNIFIEDSKG and TAPEX, respectively, and demonstrated state-of-the-art results on multiple benchmarks. However, auto-regressive PLMs are challenged by recent emerging numerical reasoning datasets, such as TAT-QA, due to the error-prone implicit calculation. In this paper, we present TaCube, to pre-compute aggregation/arithmetic results for the table in advance, so that they are handy and readily available for PLMs to answer numerical reasoning questions. TaCube systematically and comprehensively covers a collection of computational operations over table segments. By simply concatenating TaCube to the input sequence of PLMs, it shows significant experimental effectiveness. TaCube promotes the F1 score from 49.6% to 66.2% on TAT-QA and achieves new state-of-the-art results on WikiTQ (59.6% denotation accuracy). TaCube\u2019s improvements on numerical reasoning cases are even more notable: on TAT-QA, TaCube promotes the exact match accuracy of BART-large by 39.6% on sum, 52.5% on average, 36.6% on substraction, and 22.2% on division. We believe that TaCube is a general and portable pre-computation solution that can be potentially integrated to various numerical reasoning frameworks", + "author": "Fan Zhou; Mengkang Hu; Haoyu Dong; Zhoujun Cheng; Fan Cheng; Shi Han; Dongmei Zhang", + "authorids": "/f/fan-zhou/; /m/mengkang-hu/; /h/haoyu-dong/; /z/zhoujun-cheng/; /f/fan-cheng/; /s/shi-han/; /d/dongmei-zhang/", + "bibtex": "@inproceedings{zhou-etal-2022-tacube,\n title = \"{T}a{C}ube: Pre-computing Data Cubes for Answering Numerical-Reasoning Questions over Tabular Data\",\n author = \"Zhou, Fan and\n Hu, Mengkang and\n Dong, Haoyu and\n Cheng, Zhoujun and\n Cheng, Fan and\n Han, Shi and\n Zhang, Dongmei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.145/\",\n doi = \"10.18653/v1/2022.emnlp-main.145\",\n pages = \"2278--2291\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.145.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.145/", + "pdf_size": 1010617, + "gs_citation": 25, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4873680653944136383&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Shanghai Jiao Tong University; Harbin Institute of Technology; Microsoft Research; Shanghai Jiao Tong University; Shanghai Jiao Tong University; Microsoft Research; Microsoft Research", + "aff_domain": "sjtu.edu.cn;stu.hit.edu.cn;microsoft.com;sjtu.edu.cn;sjtu.edu.cn;microsoft.com;microsoft.com", + "email": "sjtu.edu.cn;stu.hit.edu.cn;microsoft.com;sjtu.edu.cn;sjtu.edu.cn;microsoft.com;microsoft.com", + "github": "https://github.com/microsoft/TaCube", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;0;0;2;2", + "aff_unique_norm": "Shanghai Jiao Tong University;Harbin Institute of Technology;Microsoft Corporation", + "aff_unique_dep": ";;Microsoft Research", + "aff_unique_url": "https://www.sjtu.edu.cn;http://www.hit.edu.cn/;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "SJTU;HIT;MSR", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Harbin", + "aff_country_unique_index": "0;0;1;0;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.503", + "title": "Table-To-Text generation and pre-training with TabT5", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Encoder-only transformer models have been successfully applied to different table understanding tasks, as in TAPAS. A major limitation of these architectures is that they are constrained to classification-like tasks such as cell selection or entailment detection. We present TabT5, an encoder-decoder model that generates natural language text based on tables and textual inputs. TabT5 overcomes the encoder-only limitation by incorporating a decoder component and leverages the input structure with table specific embeddings and pre-training. TabT5 achieves new state-of-the-art results on several domains, including spreadsheet formula prediction with a 15% increase in sequence accuracy, QA with a 2.5% increase in sequence accuracy and data-to-text generation with a 2.5% increase in BLEU.", + "author": "Ewa Andrejczuk; Julian Eisenschlos; Francesco Piccinno; Syrine Krichene; Yasemin Altun", + "authorids": "/e/ewa-andrejczuk/; /j/julian-eisenschlos/; /f/francesco-piccinno/; /s/syrine-krichene/; /y/yasemin-altun/", + "bibtex": "@inproceedings{andrejczuk-etal-2022-table,\n title = \"Table-To-Text generation and pre-training with {T}ab{T}5\",\n author = \"Andrejczuk, Ewa and\n Eisenschlos, Julian and\n Piccinno, Francesco and\n Krichene, Syrine and\n Altun, Yasemin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.503/\",\n doi = \"10.18653/v1/2022.findings-emnlp.503\",\n pages = \"6758--6766\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.503.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.503/", + "pdf_size": 500634, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16680710706120065385&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Google Research, Z\u00fcrich; Google Research, Z\u00fcrich; Google Research, Z\u00fcrich; Google Research, Z\u00fcrich; Google Research, Z\u00fcrich", + "aff_domain": "google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Z\u00fcrich", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "Switzerland" + }, + { + "id": "2022.emnlp-industry.18", + "title": "Tackling Temporal Questions in Natural Language Interface to Databases", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Temporal aspect is one of the most challenging areas in Natural Language Interface to Databases (NLIDB). This paper addresses and examines how temporal questions being studied and supported by the research community at both levels: popular annotated dataset (e.g. Spider) and recent advanced models. We present a new dataset with accompanied databases supporting temporal questions in NLIDB. We experiment with two SOTA models (Picard and ValueNet) to investigate how our new dataset helps these models learn and improve performance in temporal aspect.", + "author": "Ngoc Phuoc An Vo; Octavian Popescu; Irene Manotas; Vadim Sheinin", + "authorids": "/n/ngoc-phuoc-an-vo/; /o/octavian-popescu/; /i/irene-manotas/; /v/vadim-sheinin/", + "bibtex": "@inproceedings{vo-etal-2022-tackling,\n title = \"Tackling Temporal Questions in Natural Language Interface to Databases\",\n author = \"Vo, Ngoc Phuoc An and\n Popescu, Octavian and\n Manotas, Irene and\n Sheinin, Vadim\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.18/\",\n doi = \"10.18653/v1/2022.emnlp-industry.18\",\n pages = \"179--187\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.18.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.18/", + "pdf_size": 230962, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4472076360415318838&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.findings-emnlp.416", + "title": "Task Compass: Scaling Multi-task Pre-training with Task Prefix", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Leveraging task-aware annotated data as supervised signals to assist with self-supervised learning on large-scale unlabeled data has become a new trend in pre-training language models. Existing studies show that multi-task learning with large-scale supervised tasks suffers from negative effects across tasks. To tackle the challenge, we propose a task prefix guided multi-task pre-training framework to explore the relationships among tasks. We conduct extensive experiments on 40 datasets, which show that our model can not only serve as the strong foundation backbone for a wide range of tasks but also be feasible as a probing tool for analyzing task relationships. The task relationships reflected by the prefixes align transfer learning performance between tasks. They also suggest directions for data augmentation with complementary tasks, which help our model achieve human-parity results on commonsense reasoning leaderboards. Code is available at https://github.com/cooelf/CompassMTL.", + "author": "Zhuosheng Zhang; Shuohang Wang; Yichong Xu; Yuwei Fang; Wenhao Yu; Yang Liu; Hai Zhao; Chenguang Zhu; Michael Zeng", + "authorids": "/z/zhuosheng-zhang/; /s/shuohang-wang/; /y/yichong-xu/; /y/yuwei-fang/; /w/wenhao-yu/; /y/yang-liu/; /h/hai-zhao/; /c/chenguang-zhu/; /m/michael-zeng/", + "bibtex": "@inproceedings{zhang-etal-2022-task,\n title = \"Task Compass: Scaling Multi-task Pre-training with Task Prefix\",\n author = \"Zhang, Zhuosheng and\n Wang, Shuohang and\n Xu, Yichong and\n Fang, Yuwei and\n Yu, Wenhao and\n Liu, Yang and\n Zhao, Hai and\n Zhu, Chenguang and\n Zeng, Michael\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.416/\",\n doi = \"10.18653/v1/2022.findings-emnlp.416\",\n pages = \"5671--5685\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.416.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.416/", + "pdf_size": 1286492, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12289046333709080445&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Shanghai Jiao Tong University; Microsoft Cognitive Services Research; Microsoft Cognitive Services Research; Microsoft Cognitive Services Research; University of Notre Dame; Microsoft Cognitive Services Research; Shanghai Jiao Tong University; Microsoft Cognitive Services Research; Microsoft Cognitive Services Research", + "aff_domain": "sjtu.edu.cn;microsoft.com;microsoft.com;microsoft.com;nd.edu;microsoft.com;cs.sjtu.edu.cn;microsoft.com;microsoft.com", + "email": "sjtu.edu.cn;microsoft.com;microsoft.com;microsoft.com;nd.edu;microsoft.com;cs.sjtu.edu.cn;microsoft.com;microsoft.com", + "github": "https://github.com/cooelf/CompassMTL", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;1;1;2;1;0;1;1", + "aff_unique_norm": "Shanghai Jiao Tong University;Microsoft;University of Notre Dame", + "aff_unique_dep": ";Cognitive Services Research;", + "aff_unique_url": "https://www.sjtu.edu.cn;https://www.microsoft.com;https://www.nd.edu", + "aff_unique_abbr": "SJTU;Microsoft;Notre Dame", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1;1;0;1;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.439", + "title": "Teaching Broad Reasoning Skills for Multi-Step QA by Generating Hard Contexts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Question-answering datasets require a broad set of reasoning skills. We show how to use question decompositions to teach language models these broad reasoning skills in a robust fashion. Specifically, we use widely available QDMR representations to programmatically create hard-to-cheat synthetic contexts for real questions in six multi-step reasoning datasets. These contexts are carefully designed to avoid common reasoning shortcuts prevalent in real contexts that prevent models from learning the right skills. This results in a pretraining dataset, named TeaBReaC, containing 525K multi-step questions (with associated formal programs) covering about 900 reasoning patterns. We show that pretraining standard language models (LMs) on TeaBReaC before fine-tuning them on target datasets improves their performance by up to 13 F1 points across 4 multi-step QA datasets, with up to 21 point gain on more complex questions. The resulting models also demonstrate higher robustness, with a 5-8 F1 point improvement on two contrast sets. Furthermore, TeaBReaC pretraining substantially improves model performance and robustness even when starting with numerate LMs pretrained using recent methods (e.g., PReasM, POET). Our work thus shows how to effectively use decomposition-guided contexts to robustly teach multi-step reasoning.", + "author": "Harsh Trivedi; Niranjan Balasubramanian; Tushar Khot; Ashish Sabharwal", + "authorids": "/h/harsh-trivedi/; /n/niranjan-balasubramanian/; /t/tushar-khot/; /a/ashish-sabharwal/", + "bibtex": "@inproceedings{trivedi-etal-2022-teaching,\n title = \"Teaching Broad Reasoning Skills for Multi-Step {QA} by Generating Hard Contexts\",\n author = \"Trivedi, Harsh and\n Balasubramanian, Niranjan and\n Khot, Tushar and\n Sabharwal, Ashish\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.439/\",\n doi = \"10.18653/v1/2022.emnlp-main.439\",\n pages = \"6541--6566\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.439.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.439/", + "pdf_size": 2002261, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15271288756240274889&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Stony Brook University\u2020\u2217; Allen Institute for AI\u2021; Allen Institute for AI\u2021; Allen Institute for AI\u2021", + "aff_domain": "cs.stonybrook.edu;cs.stonybrook.edu;allenai.org;allenai.org", + "email": "cs.stonybrook.edu;cs.stonybrook.edu;allenai.org;allenai.org", + "github": "https://github.com/stonybrooknlp/teabreac", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;1", + "aff_unique_norm": "Stony Brook University;Allen Institute for AI", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.stonybrook.edu;https://allenai.org", + "aff_unique_abbr": "SBU;AI2", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.364", + "title": "TeleMelody: Lyric-to-Melody Generation with a Template-Based Two-Stage Method", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Lyric-to-melody generation is an important task in automatic songwriting. Previous lyric-to-melody generation systems usually adopt end-to-end models that directly generate melodies from lyrics, which suffer from several issues: 1) lack of paired lyric-melody training data; 2) lack of control on generated melodies. In this paper, we develop TeleMelody, a two-stage lyric-to-melody generation system with music template (e.g., tonality, chord progression, rhythm pattern, and cadence) to bridge the gap between lyrics and melodies (i.e., the system consists of a lyric-to-template module and a template-to-melody module). TeleMelody has two advantages. First, it is data efficient. The template-to-melody module is trained in a self-supervised way (i.e., the source template is extracted from the target melody) that does not need any lyric-melody paired data. The lyric-to-template module is made up of some rules and a lyric-to-rhythm model, which is trained with paired lyric-rhythm data that is easier to obtain than paired lyric-melody data. Second, it is controllable. The design of the template ensures that the generated melodies can be controlled by adjusting the musical elements in the template. Both subjective and objective experimental evaluations demonstrate that TeleMelody generates melodies with higher quality, better controllability, and less requirement on paired lyric-melody data than previous generation systems.", + "author": "Zeqian Ju; Peiling Lu; Xu Tan; Rui Wang; Chen Zhang; Songruoyao Wu; Kejun Zhang; Xiang-Yang Li; Tao Qin; Tie-Yan Liu", + "authorids": "/z/zeqian-ju/; /p/peiling-lu/; /x/xu-tan/; /r/rui-wang/; /c/chen-zhang/; /s/songruoyao-wu/; /k/kejun-zhang/; /x/xiang-yang-li/; /t/tao-qin/; /t/tie-yan-liu/", + "bibtex": "@inproceedings{ju-etal-2022-telemelody,\n title = \"{T}ele{M}elody: Lyric-to-Melody Generation with a Template-Based Two-Stage Method\",\n author = \"Ju, Zeqian and\n Lu, Peiling and\n Tan, Xu and\n Wang, Rui and\n Zhang, Chen and\n Wu, Songruoyao and\n Zhang, Kejun and\n Li, Xiang-Yang and\n Qin, Tao and\n Liu, Tie-Yan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.364/\",\n doi = \"10.18653/v1/2022.emnlp-main.364\",\n pages = \"5426--5437\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.364.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.364/", + "pdf_size": 6777376, + "gs_citation": 48, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14073546414629936212&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;;;", + "aff_domain": ";;;;;;;;;", + "email": ";;;;;;;;;", + "github": "", + "project": "", + "author_num": 10 + }, + { + "id": "2022.emnlp-main.418", + "title": "TemporalWiki: A Lifelong Benchmark for Training and Evaluating Ever-Evolving Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Language Models (LMs) become outdated as the world changes; they often fail to perform tasks requiring recent factual information which was absent or different during training, a phenomenon called temporal misalignment. This is especially a challenging problem because the research community still lacks a coherent dataset for assessing the adaptability of LMs to frequently-updated knowledge corpus such as Wikipedia. To this end, we introduce TemporalWiki, a lifelong benchmark for ever-evolving LMs that utilizes the difference between consecutive snapshots of English Wikipedia and English Wikidata for training and evaluation, respectively. The benchmark hence allows researchers to periodically track an LM\u2019s ability to retain previous knowledge and acquire updated/new knowledge at each point in time. We also find that training an LM on the diff data through continual learning methods achieves similar or better perplexity than on the entire snapshot in our benchmark with 12 times less computational cost, which verifies that factual knowledge in LMs can be safely updated with minimal training data via continual learning.", + "author": "Joel Jang; Seonghyeon Ye; Changho Lee; Sohee Yang; Joongbo Shin; Janghoon Han; Gyeonghun Kim; Minjoon Seo", + "authorids": "/j/joel-jang/; /s/seonghyeon-ye/; /c/changho-lee/; /s/sohee-yang/; /j/joongbo-shin/; /j/janghoon-han/; /g/gyeonghun-kim/; /m/minjoon-seo/", + "bibtex": "@inproceedings{jang-etal-2022-temporalwiki,\n title = \"{T}emporal{W}iki: A Lifelong Benchmark for Training and Evaluating Ever-Evolving Language Models\",\n author = \"Jang, Joel and\n Ye, Seonghyeon and\n Lee, Changho and\n Yang, Sohee and\n Shin, Joongbo and\n Han, Janghoon and\n Kim, Gyeonghun and\n Seo, Minjoon\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.418/\",\n doi = \"10.18653/v1/2022.emnlp-main.418\",\n pages = \"6237--6250\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.418.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.418/", + "pdf_size": 2925645, + "gs_citation": 92, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6634957804601895229&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff": "KAIST; KAIST; Korea University; KAIST; LG AI Research; LG AI Research; LG AI Research; KAIST", + "aff_domain": "kaist.ac.kr;kaist.ac.kr;korea.ac.kr;kaist.ac.kr;lgresearch.ai;lgresearch.ai;lgresearch.ai;kaist.ac.kr", + "email": "kaist.ac.kr;kaist.ac.kr;korea.ac.kr;kaist.ac.kr;lgresearch.ai;lgresearch.ai;lgresearch.ai;kaist.ac.kr", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;1;0;2;2;2;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology;Korea University;LG AI Research", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.kaist.ac.kr;https://www.korea.ac.kr;https://www.lgaires.com", + "aff_unique_abbr": "KAIST;KU;LG AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.114", + "title": "Text Editing as Imitation Game", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Text editing, such as grammatical error correction, arises naturally from imperfect textual data. Recent works frame text editing as a multi-round sequence tagging task, where operations \u2013 such as insertion and substitution \u2013 are represented as a sequence of tags. While achieving good results, this encoding is limited in flexibility as all actions are bound to token-level tags. In this work, we reformulate text editing as an imitation game using behavioral cloning. Specifically, we convert conventional sequence-to-sequence data into state-to-action demonstrations, where the action space can be as flexible as needed. Instead of generating the actions one at a time, we introduce a dual decoders structure to parallel the decoding while retaining the dependencies between action tokens, coupled with trajectory augmentation to alleviate the distribution shift that imitation learning often suffers. In experiments on a suite of Arithmetic Equation benchmarks, our model consistently outperforms the autoregressive baselines in terms of performance, efficiency, and robustness. We hope our findings will shed light on future studies in reinforcement learning applying sequence-level action generation to natural language processing.", + "author": "Ning Shi; Bin Tang; Bo Yuan; Longtao Huang; Yewen Pu; Jie Fu; Zhouhan Lin", + "authorids": "/n/ning-shi/; /b/bin-tang/; /b/bo-yuan/; /l/longtao-huang/; /y/yewen-pu/; /j/jie-fu/; /z/zhouhan-lin/", + "bibtex": "@inproceedings{shi-etal-2022-text,\n title = \"Text Editing as Imitation Game\",\n author = \"Shi, Ning and\n Tang, Bin and\n Yuan, Bo and\n Huang, Longtao and\n Pu, Yewen and\n Fu, Jie and\n Lin, Zhouhan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.114/\",\n doi = \"10.18653/v1/2022.findings-emnlp.114\",\n pages = \"1583--1594\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.114.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.114/", + "pdf_size": 543837, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3021013458126668554&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Alberta Machine Intelligence Institute, Dept. of Computing Science, University of Alberta; Alibaba Group; Alibaba Group; Alibaba Group; Autodesk Research; Beijing Academy of Arti\ufb01cial Intelligence; Shanghai Jiao Tong University", + "aff_domain": "ualberta.ca;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;autodesk.com;baai.ac.cn;gmail.com", + "email": "ualberta.ca;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;autodesk.com;baai.ac.cn;gmail.com", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;1;2;3;4", + "aff_unique_norm": "University of Alberta;Alibaba Group;Autodesk;Beijing Academy of Artificial Intelligence;Shanghai Jiao Tong University", + "aff_unique_dep": "Dept. of Computing Science;;Autodesk Research;;", + "aff_unique_url": "https://www.ualberta.ca;https://www.alibaba.com;https://research.autodesk.com;https://www.baaic.cn;https://www.sjtu.edu.cn", + "aff_unique_abbr": "UAlberta;Alibaba;Autodesk;BAAI;SJTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;2;1;1", + "aff_country_unique": "Canada;China;United States" + }, + { + "id": "2022.emnlp-main.521", + "title": "Text Style Transferring via Adversarial Masking and Styled Filling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Text style transfer is an important task in natural language processing with broad applications. Existing models following the masking and filling scheme suffer two challenges: the word masking procedure may mistakenly remove unexpected words and the selected words in the word filling procedure may lack diversity and semantic consistency. To tackle both challenges, in this study, we propose a style transfer model, with an adversarial masking approach and a styled filling technique (AMSF). Specifically, AMSF first trains a mask predictor by adversarial training without manual configuration. Then two additional losses, i.e. an entropy maximization loss and a consistency regularization loss, are introduced in training the word filling module to guarantee the diversity and semantic consistency of the transferred texts. Experimental results and analysis on two benchmark text style transfer data sets demonstrate the effectiveness of the proposed approaches.", + "author": "Jiarui Wang; Richong Zhang; Junfan Chen; Jaein Kim; Yongyi Mao", + "authorids": "/j/jiarui-wang/; /r/richong-zhang/; /j/junfan-chen/; /j/jaein-kim/; /y/yongyi-mao/", + "bibtex": "@inproceedings{wang-etal-2022-text,\n title = \"Text Style Transferring via Adversarial Masking and Styled Filling\",\n author = \"Wang, Jiarui and\n Zhang, Richong and\n Chen, Junfan and\n Kim, Jaein and\n Mao, Yongyi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.521/\",\n doi = \"10.18653/v1/2022.emnlp-main.521\",\n pages = \"7654--7663\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.521.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.521/", + "pdf_size": 600516, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=72689374405449473&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "SKLSDE, Beihang University, Beijing, China; SKLSDE, Beihang University, Beijing, China + Zhongguancun Laboratory, Beijing, China; SKLSDE, Beihang University, Beijing, China; SKLSDE, Beihang University, Beijing, China; School of Electrical Engineering and Computer Science, University of Ottawa, Canada", + "aff_domain": "act.buaa.edu.cn;act.buaa.edu.cn;act.buaa.edu.cn;buaa.edu.cn;uottawa.ca", + "email": "act.buaa.edu.cn;act.buaa.edu.cn;act.buaa.edu.cn;buaa.edu.cn;uottawa.ca", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0+1;0;0;2", + "aff_unique_norm": "Beihang University;Zhongguancun Laboratory;University of Ottawa", + "aff_unique_dep": "SKLSDE;;School of Electrical Engineering and Computer Science", + "aff_unique_url": "http://www.buaa.edu.cn;;https://www.uottawa.ca", + "aff_unique_abbr": ";;U Ottawa", + "aff_campus_unique_index": "0;0;0;0;2", + "aff_campus_unique": "Beijing;;Ottawa", + "aff_country_unique_index": "0;0+0;0;0;1", + "aff_country_unique": "China;Canada" + }, + { + "id": "2022.findings-emnlp.299", + "title": "Text-Only Training for Image Captioning using Noise-Injected CLIP", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We consider the task of image-captioning using only the CLIP model and additional text data at training time and no additional captioned images. Our approach relies on the fact that CLIP is trained to make visual and textual embeddings similar. Therefore, we only need to learn how to translate CLIP textual embeddings back into text, and we can learn how to do this by learning a decoder for the frozen CLIP text encoder using only text. We argue that this intuition is \u201calmost correct\u201d because of a gap between the embedding spaces, and propose to rectify this via noise injection during training. We demonstrate the effectiveness of our approach by showing SOTA zero-shot image captioning across four benchmarks, including style transfer. Code, data, and models are available at https://github.com/DavidHuji/CapDec.", + "author": "David Nukrai; Ron Mokady; Amir Globerson", + "authorids": "/d/david-nukrai/; /r/ron-mokady/; /a/amir-globerson/", + "bibtex": "@inproceedings{nukrai-etal-2022-text,\n title = \"Text-Only Training for Image Captioning using Noise-Injected {CLIP}\",\n author = \"Nukrai, David and\n Mokady, Ron and\n Globerson, Amir\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.299/\",\n doi = \"10.18653/v1/2022.findings-emnlp.299\",\n pages = \"4055--4063\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.299.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.299/", + "pdf_size": 932048, + "gs_citation": 116, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11662111788243931168&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.emnlp-main.572", + "title": "TextFusion: Privacy-Preserving Pre-trained Model Inference via Token Fusion", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recently, more and more pre-trained language models are released as a cloud service. It allows users who lack computing resources to perform inference with a powerful model by uploading data to the cloud. The plain text may contain private information, as the result, users prefer to do partial computations locally and upload intermediate representations to the cloud for subsequent inference.However, recent studies have shown that intermediate representations can also be recovered to plain text with reasonable accuracy, thus the risk of privacy leakage still exists. To address this issue, we propose TextFusion, a novel method for preserving inference privacy.Specifically, we train a Fusion Predictor to dynamically fuse token representations, which hides multiple private token representations behind an unrecognizable one.Furthermore, an adversarial training regime is employed to privatize these representations. In this way, the cloud only receives incomplete and perturbed representations, making it difficult to accurately recover the complete plain text.The experimental results on diverse classification tasks show that our approach can effectively preserve inference privacy without significantly sacrificing performance in different scenarios.", + "author": "Xin Zhou; Jinzhu Lu; Tao Gui; Ruotian Ma; Zichu Fei; Yuran Wang; Yong Ding; Yibo Cheung; Qi Zhang; Xuanjing Huang", + "authorids": "/x/xin-zhou/; /j/jinzhu-lu/; /t/tao-gui/; /r/ruotian-ma/; /z/zichu-fei/; /y/yuran-wang/; /y/yong-ding/; /y/yibo-cheung/; /q/qi-zhang/; /x/xuan-jing-huang/", + "bibtex": "@inproceedings{zhou-etal-2022-textfusion,\n title = \"{T}ext{F}usion: Privacy-Preserving Pre-trained Model Inference via Token Fusion\",\n author = \"Zhou, Xin and\n Lu, Jinzhu and\n Gui, Tao and\n Ma, Ruotian and\n Fei, Zichu and\n Wang, Yuran and\n Ding, Yong and\n Cheung, Yibo and\n Zhang, Qi and\n Huang, Xuanjing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.572/\",\n doi = \"10.18653/v1/2022.emnlp-main.572\",\n pages = \"8360--8371\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.572.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.572/", + "pdf_size": 591720, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10366271937003569008&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff": "School of Computer Science, Fudan University, Shanghai, China; School of Computer Science, Fudan University, Shanghai, China; Institute of Modern Languages and Linguistics, Fudan University, Shanghai, China; School of Computer Science, Fudan University, Shanghai, China; School of Computer Science, Fudan University, Shanghai, China; Honor Device Co., Ltd; Honor Device Co., Ltd; Honor Device Co., Ltd; School of Computer Science, Fudan University, Shanghai, China + International Human Phenome Institutes, Shanghai, China; School of Computer Science, Fudan University, Shanghai, China + International Human Phenome Institutes, Shanghai, China", + "aff_domain": "fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn; ; ; ; ; ;fudan.edu.cn;fudan.edu.cn", + "email": "fudan.edu.cn;m.fudan.edu.cn;fudan.edu.cn; ; ; ; ; ;fudan.edu.cn;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;1;1;1;0+2;0+2", + "aff_unique_norm": "Fudan University;Honor Device Co., Ltd;International Human Phenome Institutes", + "aff_unique_dep": "School of Computer Science;;", + "aff_unique_url": "https://www.fudan.edu.cn;;", + "aff_unique_abbr": "Fudan;;", + "aff_campus_unique_index": "0;0;0;0;0;0+0;0+0", + "aff_campus_unique": "Shanghai;", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.44", + "title": "TextHacker: Learning based Hybrid Local Search Algorithm for Text Hard-label Adversarial Attack", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Existing textual adversarial attacks usually utilize the gradient or prediction confidence to generate adversarial examples, making it hard to be deployed in real-world applications. To this end, we consider a rarely investigated but more rigorous setting, namely hard-label attack, in which the attacker can only access the prediction label. In particular, we find we can learn the importance of different words via the change on prediction label caused by word substitutions on the adversarial examples. Based on this observation, we propose a novel adversarial attack, termed Text Hard-label attacker (TextHacker). TextHacker randomly perturbs lots of words to craft an adversarial example. Then, TextHacker adopts a hybrid local search algorithm with the estimation of word importance from the attack history to minimize the adversarial perturbation. Extensive evaluations for text classification and textual entailment show that TextHacker significantly outperforms existing hard-label attacks regarding the attack performance as well as adversary quality.", + "author": "Zhen Yu; Xiaosen Wang; Wanxiang Che; Kun He", + "authorids": "/z/zhen-yu/; /x/xiaosen-wang/; /w/wanxiang-che/; /k/kun-he/", + "bibtex": "@inproceedings{yu-etal-2022-texthacker,\n title = \"{T}ext{H}acker: Learning based Hybrid Local Search Algorithm for Text Hard-label Adversarial Attack\",\n author = \"Yu, Zhen and\n Wang, Xiaosen and\n Che, Wanxiang and\n He, Kun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.44/\",\n doi = \"10.18653/v1/2022.findings-emnlp.44\",\n pages = \"622--637\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.44.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.44/", + "pdf_size": 4773052, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17650821868670673875&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff": "School of Computer Science and Technology, Huazhong University of Science and Technology, Wuhan, China + Huawei Singular Security Lab, Beijing, China; School of Computer Science and Technology, Huazhong University of Science and Technology, Wuhan, China + Huawei Singular Security Lab, Beijing, China; Research Center for SCIR, Harbin Institute of Technology, Harbin, China; School of Computer Science and Technology, Huazhong University of Science and Technology, Wuhan, China", + "aff_domain": "hust.edu.cn;hust.edu.cn;ir.hit.edu.cn;hust.edu.cn", + "email": "hust.edu.cn;hust.edu.cn;ir.hit.edu.cn;hust.edu.cn", + "github": "https://github.com/JHL-HUST/TextHacker", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;2;0", + "aff_unique_norm": "Huazhong University of Science and Technology;Huawei;Harbin Institute of Technology", + "aff_unique_dep": "School of Computer Science and Technology;Singular Security Lab;Research Center for SCIR", + "aff_unique_url": "http://www.hust.edu.cn;https://www.huawei.com;http://www.hit.edu.cn/", + "aff_unique_abbr": "HUST;Huawei;HIT", + "aff_campus_unique_index": "0+1;0+1;2;0", + "aff_campus_unique": "Wuhan;Beijing;Harbin", + "aff_country_unique_index": "0+0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.769", + "title": "Textless Speech Emotion Conversion using Discrete & Decomposed Representations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Speech emotion conversion is the task of modifying the perceived emotion of a speech utterance while preserving the lexical content and speaker identity. In this study, we cast the problem of emotion conversion as a spoken language translation task. We use a decomposition of the speech signal into discrete learned representations, consisting of phonetic-content units, prosodic features, speaker, and emotion. First, we modify the speech content by translating the phonetic-content units to a target emotion, and then predict the prosodic features based on these units. Finally, the speech waveform is generated by feeding the predicted representations into a neural vocoder. Such a paradigm allows us to go beyond spectral and parametric changes of the signal, and model non-verbal vocalizations, such as laughter insertion, yawning removal, etc. We demonstrate objectively and subjectively that the proposed method is vastly superior to current approaches and even beats text-based systems in terms of perceived emotion and audio quality. We rigorously evaluate all components of such a complex system and conclude with an extensive model analysis and ablation study to better emphasize the architectural choices, strengths and weaknesses of the proposed method. Samples are available under the following link: https://speechbot.github.io/emotion", + "author": "Felix Kreuk; Adam Polyak; Jade Copet; Eugene Kharitonov; Tu Anh Nguyen; Morgan Rivi\u00e8re; Wei-Ning Hsu; Abdelrahman Mohamed; Emmanuel Dupoux; Yossi Adi", + "authorids": "/f/felix-kreuk/; /a/adam-polyak/; /j/jade-copet/; /e/eugene-kharitonov/; /t/tu-anh-nguyen/; /m/morgan-riviere/; /w/wei-ning-hsu/; /a/abdelrahman-mohamed/; /e/emmanuel-dupoux/; /y/yossi-adi/", + "bibtex": "@inproceedings{kreuk-etal-2022-textless,\n title = \"Textless Speech Emotion Conversion using Discrete {\\&} Decomposed Representations\",\n author = \"Kreuk, Felix and\n Polyak, Adam and\n Copet, Jade and\n Kharitonov, Eugene and\n Nguyen, Tu Anh and\n Rivi{\\`e}re, Morgan and\n Hsu, Wei-Ning and\n Mohamed, Abdelrahman and\n Dupoux, Emmanuel and\n Adi, Yossi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.769/\",\n doi = \"10.18653/v1/2022.emnlp-main.769\",\n pages = \"11200--11214\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.769.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.769/", + "pdf_size": 748712, + "gs_citation": 70, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11921676017858470023&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Bar-Ilan University, Ramat-Gan, Israel+Meta AI Research; Meta AI Research; Meta AI Research; Meta AI Research; Meta AI Research; Meta AI Research; Meta AI Research; Meta AI Research; EHESS, Paris+Meta AI Research; Meta AI Research", + "aff_domain": "fb.com; ; ; ; ; ; ; ;fb.com; ", + "email": "fb.com; ; ; ; ; ; ; ;fb.com; ", + "github": "", + "project": "[samples]", + "author_num": 10, + "aff_unique_index": "0+1;1;1;1;1;1;1;1;2+1;1", + "aff_unique_norm": "Bar-Ilan University;Meta Platforms, Inc.;Ecole des Hautes Etudes en Sciences Sociales", + "aff_unique_dep": ";Meta AI Research;", + "aff_unique_url": "https://www.biu.ac.il;https://meta.com;https://www.ehess.fr", + "aff_unique_abbr": "BIU;Meta AI;EHESS", + "aff_campus_unique_index": "0;2", + "aff_campus_unique": "Ramat-Gan;;Paris", + "aff_country_unique_index": "0+1;1;1;1;1;1;1;1;2+1;1", + "aff_country_unique": "Israel;United States;France" + }, + { + "id": "2022.emnlp-main.770", + "title": "Textual Backdoor Attacks Can Be More Harmful via Two Simple Tricks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Backdoor attacks are a kind of emergent security threat in deep learning. After being injected with a backdoor, a deep neural model will behave normally on standard inputs but give adversary-specified predictions once the input contains specific backdoor triggers. In this paper, we find two simple tricks that can make existing textual backdoor attacks much more harmful. The first trick is to add an extra training task to distinguish poisoned and clean data during the training of the victim model, and the second one is to use all the clean training data rather than remove the original clean data corresponding to the poisoned data. These two tricks are universally applicable to different attack models. We conduct experiments in three tough situations including clean data fine-tuning, low-poisoning-rate, and label-consistent attacks. Experimental results show that the two tricks can significantly improve attack performance. This paper exhibits the great potential harmfulness of backdoor attacks. All the code and data can be obtained at https://github.com/thunlp/StyleAttack.", + "author": "Yangyi Chen; Fanchao Qi; Hongcheng Gao; Zhiyuan Liu; Maosong Sun", + "authorids": "/y/yangyi-chen/; /f/fanchao-qi/; /h/hongcheng-gao/; /z/zhiyuan-liu/; /m/maosong-sun/", + "bibtex": "@inproceedings{chen-etal-2022-textual,\n title = \"Textual Backdoor Attacks Can Be More Harmful via Two Simple Tricks\",\n author = \"Chen, Yangyi and\n Qi, Fanchao and\n Gao, Hongcheng and\n Liu, Zhiyuan and\n Sun, Maosong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.770/\",\n doi = \"10.18653/v1/2022.emnlp-main.770\",\n pages = \"11215--11221\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.770.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.770/", + "pdf_size": 241966, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18021214945168315302&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "NLP Group, DCST, IAI, BNRIST, Tsinghua University + University of Illinois Urbana-Champaign; NLP Group, DCST, IAI, BNRIST, Tsinghua University; NLP Group, DCST, IAI, BNRIST, Tsinghua University + Chongqing University; NLP Group, DCST, IAI, BNRIST, Tsinghua University + IICTUS, Shanghai + Jiangsu Collaborative Innovation Center for Language Ability, Jiangsu Normal University, Xuzhou; NLP Group, DCST, IAI, BNRIST, Tsinghua University + IICTUS, Shanghai + Jiangsu Collaborative Innovation Center for Language Ability, Jiangsu Normal University, Xuzhou", + "aff_domain": "illinois.edu;mails.tsinghua.edu.cn; ; ; ", + "email": "illinois.edu;mails.tsinghua.edu.cn; ; ; ", + "github": "https://github.com/thunlp/StyleAttack", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;0+2;0+3+4;0+3+4", + "aff_unique_norm": "Tsinghua University;University of Illinois at Urbana-Champaign;Chongqing University;IICTUS;Jiangsu Normal University", + "aff_unique_dep": "NLP Group;;;;Jiangsu Collaborative Innovation Center for Language Ability", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://illinois.edu;https://www.cqu.edu.cn;;http://www.jsnu.edu.cn", + "aff_unique_abbr": "THU;UIUC;CQU;;", + "aff_campus_unique_index": "1;;2+3;2+3", + "aff_campus_unique": ";Urbana-Champaign;Shanghai;Xuzhou", + "aff_country_unique_index": "0+1;0;0+0;0+0+0;0+0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.316", + "title": "Textual Enhanced Contrastive Learning for Solving Math Word Problems", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Solving math word problems is the task that analyses the relation of quantities e and requires an accurate understanding of contextual natural language information. Recent studies show that current models rely on shallow heuristics to predict solutions and could be easily misled by small textual perturbations. To address this problem, we propose a Textual Enhanced Contrastive Learning framework, which enforces the models to distinguish semantically similar examples while holding different mathematical logic. We adopt a self-supervised manner strategy to enrich examples with subtle textual variance by textual reordering or problem re-construction. We then retrieve the hardest to differentiate samples from both equation and textual perspectives and guide the model to learn their representations. Experimental results show that our method achieves state-of-the-art on both widely used benchmark datasets and also exquisitely designed challenge datasets in English and Chinese.", + "author": "Yibin Shen; Qianying Liu; Zhuoyuan Mao; Fei Cheng; Sadao Kurohashi", + "authorids": "/y/yibin-shen/; /q/qianying-liu/; /z/zhuoyuan-mao/; /f/fei-cheng/; /s/sadao-kurohashi/", + "bibtex": "@inproceedings{shen-etal-2022-textual,\n title = \"Textual Enhanced Contrastive Learning for Solving Math Word Problems\",\n author = \"Shen, Yibin and\n Liu, Qianying and\n Mao, Zhuoyuan and\n Cheng, Fei and\n Kurohashi, Sadao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.316/\",\n doi = \"10.18653/v1/2022.findings-emnlp.316\",\n pages = \"4297--4307\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.316.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.316/", + "pdf_size": 737059, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=554532578667325046&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 6, + "aff": "Meituan; Graduate School of Informatics, Kyoto University; Graduate School of Informatics, Kyoto University; Graduate School of Informatics, Kyoto University; Graduate School of Informatics, Kyoto University", + "aff_domain": "meituan.com;nlp.ist.i.kyoto-u.ac.jp;nlp.ist.i.kyoto-u.ac.jp;i.kyoto-u.ac.jp;i.kyoto-u.ac.jp", + "email": "meituan.com;nlp.ist.i.kyoto-u.ac.jp;nlp.ist.i.kyoto-u.ac.jp;i.kyoto-u.ac.jp;i.kyoto-u.ac.jp", + "github": "https://github.com/yiyunya/Textual_CL_MWP", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;1", + "aff_unique_norm": "Meituan;Kyoto University", + "aff_unique_dep": ";Graduate School of Informatics", + "aff_unique_url": "https://www.meituan.com;https://www.kyoto-u.ac.jp", + "aff_unique_abbr": "Meituan;Kyoto U", + "aff_campus_unique_index": "1;1;1;1", + "aff_campus_unique": ";Kyoto", + "aff_country_unique_index": "0;1;1;1;1", + "aff_country_unique": "China;Japan" + }, + { + "id": "2022.emnlp-main.443", + "title": "Textual Manifold-based Defense Against Natural Language Adversarial Examples", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Despite the recent success of large pretrained language models in NLP, they are susceptible to adversarial examples. Concurrently, several studies on adversarial images have observed an intriguing property: the adversarial images tend to leave the low-dimensional natural data manifold. In this study, we find a similar phenomenon occurs in the contextualized embedding space of natural sentences induced by pretrained language models in which textual adversarial examples tend to have their embeddings diverge off the manifold of natural sentence embeddings. Based on this finding, we propose Textual Manifold-based Defense (TMD), a defense mechanism that learns the embedding space manifold of the underlying language model and projects novel inputs back to the approximated structure before classification. Through extensive experiments, we find that our method consistently and significantly outperforms previous defenses under various attack settings while remaining unaffected to the clean accuracy. To the best of our knowledge, this is the first kind of manifold-based defense adapted to the NLP domain.", + "author": "Dang Nguyen Minh; Anh Tuan Luu", + "authorids": "/d/dang-nguyen-minh/; /l/luu-anh-tuan/", + "bibtex": "@inproceedings{nguyen-minh-luu-2022-textual,\n title = \"Textual Manifold-based Defense Against Natural Language Adversarial Examples\",\n author = \"Nguyen Minh, Dang and\n Luu, Anh Tuan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.443/\",\n doi = \"10.18653/v1/2022.emnlp-main.443\",\n pages = \"6612--6625\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.443.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.443/", + "pdf_size": 424661, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7176080668652741579&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "VinAI Research, Vietnam; Nanyang Technological University, Singapore", + "aff_domain": "vinai.io;ntu.edu.sg", + "email": "vinai.io;ntu.edu.sg", + "github": "https://github.com/dangne/tmd", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "VinAI Research;Nanyang Technological University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.vin.ai;https://www.ntu.edu.sg", + "aff_unique_abbr": "VinAI;NTU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Vietnam;Singapore" + }, + { + "id": "2022.emnlp-main.238", + "title": "That\u2019s the Wrong Lung! Evaluating and Improving the Interpretability of Unsupervised Multimodal Encoders for Medical Data", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pretraining multimodal models on Electronic Health Records (EHRs) provides a means of learning representations that can transfer to downstream tasks with minimal supervision. Recent multimodal models induce soft local alignments between image regions and sentences. This is of particular interest in the medical domain, where alignments might highlight regions in an image relevant to specific phenomena described in free-text. While past work has suggested that attention \u201cheatmaps\u201d can be interpreted in this manner, there has been little evaluation of such alignments. We compare alignments from a state-of-the-art multimodal (image and text) model for EHR with human annotations that link image regions to sentences. Our main finding is that the text has an often weak or unintuitive influence on attention; alignments do not consistently reflect basic anatomical information. Moreover, synthetic modifications \u2014 such as substituting \u201cleft\u201d for \u201cright\u201d \u2014 do not substantially influence highlights. Simple techniques such as allowing the model to opt out of attending to the image and few-shot finetuning show promise in terms of their ability to improve alignments with very little or no supervision. We make our code and checkpoints open-source.", + "author": "Jered McInerney; Geoffrey Young; Jan-Willem van de Meent; Byron Wallace", + "authorids": "/j/jered-mcinerney/; /g/geoffrey-young/; /j/jan-willem-van-de-meent/; /b/byron-c-wallace/", + "bibtex": "@inproceedings{mcinerney-etal-2022-thats,\n title = \"That`s the Wrong Lung! Evaluating and Improving the Interpretability of Unsupervised Multimodal Encoders for Medical Data\",\n author = \"McInerney, Jered and\n Young, Geoffrey and\n van de Meent, Jan-Willem and\n Wallace, Byron\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.238/\",\n doi = \"10.18653/v1/2022.emnlp-main.238\",\n pages = \"3626--3648\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.238.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.238/", + "pdf_size": 2746427, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9811483804829430229&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "Northeastern University; Brigham and Women\u2019s Hospital; University of Amsterdam; Northeastern University", + "aff_domain": "northeastern.edu;bwh.harvard.edu;uva.nl;northeastern.edu", + "email": "northeastern.edu;bwh.harvard.edu;uva.nl;northeastern.edu", + "github": "https://github.com/dmcinerney/gloria", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Northeastern University;Brigham and Women's Hospital;University of Amsterdam", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.northeastern.edu;https://www.brighamandwomens.org;https://www.uva.nl", + "aff_unique_abbr": "NEU;BWH;UvA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "United States;Netherlands" + }, + { + "id": "2022.emnlp-main.133", + "title": "The (Undesired) Attenuation of Human Biases by Multilinguality", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Some human preferences are universal. The odor of vanilla is perceived as pleasant all around the world. We expect neural models trained on human texts to exhibit these kind of preferences, i.e. biases, but we show that this is not always the case. We explore 16 static and contextual embedding models in 9 languages and, when possible, compare them under similar training conditions. We introduce and release CA-WEAT, multilingual cultural aware tests to quantify biases, and compare them to previous English-centric tests. Our experiments confirm that monolingual static embeddings do exhibit human biases, but values differ across languages, being far from universal. Biases are less evident in contextual models, to the point that the original human association might be reversed. Multilinguality proves to be another variable that attenuates and even reverses the effect of the bias, specially in contextual multilingual models. In order to explain this variance among models and languages, we examine the effect of asymmetries in the training corpus, departures from isomorphism in multilingual embedding spaces and discrepancies in the testing measures between languages.", + "author": "Cristina Espa\u00f1a-Bonet; Alberto Barr\u00f3n-Cede\u00f1o", + "authorids": "/c/cristina-espana-bonet/; /a/alberto-barron-cedeno/", + "bibtex": "@inproceedings{espana-bonet-barron-cedeno-2022-undesired,\n title = \"The (Undesired) Attenuation of Human Biases by Multilinguality\",\n author = \"Espa{\\~n}a-Bonet, Cristina and\n Barr{\\'o}n-Cede{\\~n}o, Alberto\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.133/\",\n doi = \"10.18653/v1/2022.emnlp-main.133\",\n pages = \"2056--2077\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.133.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.133/", + "pdf_size": 2164575, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15963644532933930036&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "DFKI GmbH, Saarland Informatics Campus, Saarbr\u00fcken, Germany; Universit\u00e1 di Bologna, Forl\u00ec, Italy", + "aff_domain": "dfki.de;unibo.it", + "email": "dfki.de;unibo.it", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "DFKI GmbH;Universit\u00e1 di Bologna", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.dFKI.de;https://www.unibo.it", + "aff_unique_abbr": "DFKI;", + "aff_campus_unique_index": "0;1", + "aff_campus_unique": "Saarland Informatics Campus;Forl\u00ec", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Germany;Italy" + }, + { + "id": "2022.emnlp-main.648", + "title": "The Aligned Multimodal Movie Treebank: An audio, video, dependency-parse treebank", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Treebanks have traditionally included only text and were derived from written sources such as newspapers or the web. We introduce the Aligned Multimodal Movie Treebank (AMMT), an English language treebank derived from dialog in Hollywood movies which includes transcriptions of the audio-visual streams with word-level alignment, as well as part of speech tags and dependency parses in the Universal Dependencies formalism. AMMT consists of 31,264 sentences and 218,090 words, that will amount to the 3rd largest UD English treebank and the only multimodal treebank in UD. To help with the web-based annotation effort, we also introduce the Efficient Audio Alignment Annotator (EAAA), a companion tool that enables annotators to significantly speed-up their annotation processes.", + "author": "Adam Yaari; Jan DeWitt; Henry Hu; Bennett Stankovits; Sue Felshin; Yevgeni Berzak; Helena Aparicio; Boris Katz; Ignacio Cases; Andrei Barbu", + "authorids": "/a/adam-yaari/; /j/jan-dewitt/; /h/henry-hu/; /b/bennett-stankovits/; /s/sue-felshin/; /y/yevgeni-berzak/; /h/helena-aparicio/; /b/boris-katz/; /i/ignacio-cases/; /a/andrei-barbu/", + "bibtex": "@inproceedings{yaari-etal-2022-aligned,\n title = \"The Aligned Multimodal Movie Treebank: An audio, video, dependency-parse treebank\",\n author = \"Yaari, Adam and\n DeWitt, Jan and\n Hu, Henry and\n Stankovits, Bennett and\n Felshin, Sue and\n Berzak, Yevgeni and\n Aparicio, Helena and\n Katz, Boris and\n Cases, Ignacio and\n Barbu, Andrei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.648/\",\n doi = \"10.18653/v1/2022.emnlp-main.648\",\n pages = \"9531--9539\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.648.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.648/", + "pdf_size": 1648032, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9215153248626626852&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "MIT CSAIL-CBMM; MIT CSAIL-CBMM; MIT CSAIL-CBMM; MIT CSAIL-CBMM; MIT CSAIL-CBMM; Technion; Cornell University; MIT CSAIL-CBMM; MIT CSAIL-CBMM; MIT CSAIL-CBMM", + "aff_domain": "mit.edu;mit.edu;mit.edu;mit.edu;mit.edu;technion.ac.il;cornell.edu;mit.edu;mit.edu;mit.edu", + "email": "mit.edu;mit.edu;mit.edu;mit.edu;mit.edu;technion.ac.il;cornell.edu;mit.edu;mit.edu;mit.edu", + "github": "https://github.com/abarbu/ammt", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;1;2;0;0;0", + "aff_unique_norm": "Massachusetts Institute of Technology;Technion - Israel Institute of Technology;Cornell University", + "aff_unique_dep": "Computer Science and Artificial Intelligence Laboratory - Center for Brains, Minds, and Machines;;", + "aff_unique_url": "https://www.mit.edu;https://www.technion.ac.il/en/;https://www.cornell.edu", + "aff_unique_abbr": "MIT;Technion;Cornell", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;1;0;0;0;0", + "aff_country_unique": "United States;Israel" + }, + { + "id": "2022.emnlp-main.788", + "title": "The Architectural Bottleneck Principle", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we seek to measure how much information a component in a neural network could extract from the representations fed into it. Our work stands in contrast to prior probing work, most of which investigates how much information a model's representations contain. This shift in perspective leads us to propose a new principle for probing, the architectural bottleneck principle: In order to estimate how much information a given component could extract, a probe should look exactly like the component. Relying on this principle, we estimate how much syntactic information is available to transformers through our attentional probe, a probe that exactly resembles a transformer's self-attention head. Experimentally, we find that, in three models (BERT, ALBERT, and RoBERTa), a sentence's syntax tree is mostly extractable by our probe, suggesting these models have access to syntactic information while composing their contextual representations. Whether this information is actually used by these models, however, remains an open question.", + "author": "Tiago Pimentel; Josef Valvoda; Niklas Stoehr; Ryan Cotterell", + "authorids": "/t/tiago-pimentel/; /j/josef-valvoda/; /n/niklas-stoehr/; /r/ryan-cotterell/", + "bibtex": "@inproceedings{pimentel-etal-2022-attentional,\n title = \"The Architectural Bottleneck Principle\",\n author = \"Pimentel, Tiago and\n Valvoda, Josef and\n Stoehr, Niklas and\n Cotterell, Ryan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.788/\",\n doi = \"10.18653/v1/2022.emnlp-main.788\",\n pages = \"11459--11472\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.788.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.788/", + "pdf_size": 647550, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5688400164295691458&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "University of Cambridge; University of Cambridge; ETH Z\u00fcrich; ETH Z\u00fcrich", + "aff_domain": "cam.ac.uk;cam.ac.uk;inf.ethz.ch;inf.ethz.ch", + "email": "cam.ac.uk;cam.ac.uk;inf.ethz.ch;inf.ethz.ch", + "github": "https://github.com/rycolab/attentional-probe", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;1", + "aff_unique_norm": "University of Cambridge;ETH Z\u00fcrich", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.cam.ac.uk;https://www.ethz.ch", + "aff_unique_abbr": "Cambridge;ETHZ", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Cambridge;", + "aff_country_unique_index": "0;0;1;1", + "aff_country_unique": "United Kingdom;Switzerland" + }, + { + "id": "2022.emnlp-main.406", + "title": "The Authenticity Gap in Human Evaluation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Human ratings are the gold standard in NLG evaluation. The standard protocol is to collect ratings of generated text, average across annotators, and rank NLG systems by their average scores. However, little consideration has been given as to whether this approach faithfully captures human preferences. Analyzing this standard protocol through the lens of utility theory in economics, we identify the implicit assumptions it makes about annotators. These assumptions are often violated in practice, in which case annotator ratings cease to reflect their preferences. The most egregious violations come from using Likert scales, which provably reverse the direction of the true preference in certain cases. We suggest improvements to the standard protocol to make it more theoretically sound, but even in its improved form, it cannot be used to evaluate open-ended tasks like story generation. For the latter, we propose a new human evaluation protocol called system-level probabilistic assessment (SPA). When human evaluation of stories is done with SPA, we can recover the ordering of GPT-3 models by size, with statistically significant results. However, when human evaluation is done with the standard protocol, less than half of the expected preferences can be recovered (e.g., there is no significant difference between curie and davinci, despite using a highly powered test).", + "author": "Kawin Ethayarajh; Dan Jurafsky", + "authorids": "/k/kawin-ethayarajh/; /d/dan-jurafsky/", + "bibtex": "@inproceedings{ethayarajh-jurafsky-2022-authenticity,\n title = \"The Authenticity Gap in Human Evaluation\",\n author = \"Ethayarajh, Kawin and\n Jurafsky, Dan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.406/\",\n doi = \"10.18653/v1/2022.emnlp-main.406\",\n pages = \"6056--6070\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.406.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.406/", + "pdf_size": 1098804, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=125893898484335749&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Stanford University; Stanford University", + "aff_domain": "stanford.edu;stanford.edu", + "email": "stanford.edu;stanford.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.326", + "title": "The Curious Case of Absolute Position Embeddings", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Transformer language models encode the notion of word order using positional information. Most commonly, this positional information is represented by absolute position embeddings (APEs), that are learned from the pretraining data. However, in natural language, it is not absolute position that matters, but relative position, and the extent to which APEs can capture this type of information has not been studied. In this work, we observe that models trained with APE over-rely on positional information to the point that they break-down when subjected to sentences with shifted position information. Specifically, when models are subjected to sentences starting from a non-zero position (excluding the effect of priming), they exhibit noticeably degraded performance on zero- to full-shot tasks, across a range of model families and model sizes. Our findings raise questions about the efficacy of APEs to model the relativity of position information, and invite further introspection on the sentence and word order processing strategies employed by these models.", + "author": "Koustuv Sinha; Amirhossein Kazemnejad; Siva Reddy; Joelle Pineau; Dieuwke Hupkes; Adina Williams", + "authorids": "/k/koustuv-sinha/; /a/amirhossein-kazemnejad/; /s/siva-reddy/; /j/joelle-pineau/; /d/dieuwke-hupkes/; /a/adina-williams/", + "bibtex": "@inproceedings{sinha-etal-2022-curious,\n title = \"The Curious Case of Absolute Position Embeddings\",\n author = \"Sinha, Koustuv and\n Kazemnejad, Amirhossein and\n Reddy, Siva and\n Pineau, Joelle and\n Hupkes, Dieuwke and\n Williams, Adina\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.326/\",\n doi = \"10.18653/v1/2022.findings-emnlp.326\",\n pages = \"4449--4472\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.326.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.326/", + "pdf_size": 836773, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11493824268094432282&as_sdt=8000005&sciodt=0,19&hl=en", + "gs_version_total": 3, + "aff": "McGill University / Mila - Quebec AI+Meta AI; McGill University / Mila - Quebec AI+Meta AI; McGill University / Mila - Quebec AI; McGill University / Mila - Quebec AI+Meta AI; Meta AI; Meta AI", + "aff_domain": "mail.mcgill.ca;mail.mcgill.ca; ; ; ; ", + "email": "mail.mcgill.ca;mail.mcgill.ca; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;0;0+1;1;1", + "aff_unique_norm": "McGill University;Meta Platforms, Inc.", + "aff_unique_dep": "Mila - Quebec AI;Meta AI", + "aff_unique_url": "https://www.mcgill.ca;https://meta.com", + "aff_unique_abbr": "McGill;Meta", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;0+1;0;0+1;1;1", + "aff_country_unique": "Canada;United States" + }, + { + "id": "2022.emnlp-main.760", + "title": "The Curious Case of Control", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Children acquiring English make systematic errors on subject control sentences even after they have reached near-adult competence (Chomsky, 1969), possibly due to heuristics based on semantic roles (Maratsos, 1974).Given the advanced fluency of large generative language models, we ask whether model outputs are consistent with these heuristics, and to what degree different models are consistent with each other. We find that models can be categorized by behavior into three separate groups, with broad differences between the groups. The outputs of models in the largest group are consistent with positional heuristics that succeed on subject control but fail on object control. This result is surprising, given that object control is orders of magnitude more frequent in the text data used to train such models. We examine to what degree the models are sensitive to prompting with agent-patient information, finding that raising the salience of agent and patient relations results in significant changes in the outputs of most models. Based on this observation, we leverage an existing dataset of semantic proto-role annotations (White et al. 2020) to explore the connections between control and labeling event participants with properties typically associated with agents and patients.", + "author": "Elias Stengel-Eskin; Benjamin Van Durme", + "authorids": "/e/elias-stengel-eskin/; /b/benjamin-van-durme/", + "bibtex": "@inproceedings{stengel-eskin-van-durme-2022-curious,\n title = \"The Curious Case of Control\",\n author = \"Stengel-Eskin, Elias and\n Van Durme, Benjamin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.760/\",\n doi = \"10.18653/v1/2022.emnlp-main.760\",\n pages = \"11065--11076\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.760.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.760/", + "pdf_size": 346424, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13101280124770838814&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Johns Hopkins University; Johns Hopkins University", + "aff_domain": "jhu.edu;jhu.edu", + "email": "jhu.edu;jhu.edu", + "github": "https://github.com/esteng/curious-case-of-control", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Johns Hopkins University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.jhu.edu", + "aff_unique_abbr": "JHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.473", + "title": "The Devil in Linear Transformer", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Linear transformers aim to reduce the quadratic space-time complexity of vanilla transformers. However, they usually suffer from degraded performances on various tasks and corpus. In this paper, we examine existing kernel-based linear transformers and identify two key issues that lead to such performance gaps: 1) unbounded gradients in the attention computation adversely impact the convergence of linear transformer models; 2) attention dilution which trivially distributes attention scores over long sequences while neglecting neighbouring structures. To address these issues, we first identify that the scaling of attention matrices is the devil in unbounded gradients, which turns out unnecessary in linear attention as we show theoretically and empirically. To this end, we propose a new linear attention that replaces the scaling operation with a normalization to stabilize gradients. For the issue of attention dilution, we leverage a diagonal attention to confine attention to only neighbouring tokens in early layers. Benefiting from the stable gradients and improved attention, our new linear transformer model, transNormer, demonstrates superior performance on text classification and language modeling tasks, as well as on the challenging Long-Range Arena benchmark, surpassing vanilla transformer and existing linear variants by a clear margin while being significantly more space-time efficient. The code is available at https://github.com/OpenNLPLab/Transnormer .", + "author": "Zhen Qin; Xiaodong Han; Weixuan Sun; Dongxu Li; Lingpeng Kong; Nick Barnes; Yiran Zhong", + "authorids": "/z/zhen-qin/; /x/xiaodong-han/; /w/weixuan-sun/; /d/dongxu-li/; /l/lingpeng-kong/; /n/nick-barnes/; /y/yiran-zhong/", + "bibtex": "@inproceedings{qin-etal-2022-devil,\n title = \"The Devil in Linear Transformer\",\n author = \"Qin, Zhen and\n Han, Xiaodong and\n Sun, Weixuan and\n Li, Dongxu and\n Kong, Lingpeng and\n Barnes, Nick and\n Zhong, Yiran\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.473/\",\n doi = \"10.18653/v1/2022.emnlp-main.473\",\n pages = \"7025--7041\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.473.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.473/", + "pdf_size": 1013853, + "gs_citation": 79, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11998716820864044481&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "SenseTime Research; SenseTime Research; Australian National University+OPPO Research Institute; Australian National University; Shanghai AI Laboratory+The University of Hong Kong; Australian National University; Shanghai AI Laboratory", + "aff_domain": "gmail.com; ; ; ; ; ;gmail.com", + "email": "gmail.com; ; ; ; ; ;gmail.com", + "github": "https://github.com/OpenNLPLab/Transnormer", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1+2;1;3+4;1;3", + "aff_unique_norm": "SenseTime;Australian National University;OPPO Research Institute;Shanghai AI Laboratory;The University of Hong Kong", + "aff_unique_dep": "SenseTime Research;;;;", + "aff_unique_url": "https://www.sensetime.com;https://www.anu.edu.au;https://www.oppo.com/en;https://www.shanghai-ai-lab.com;https://www.hku.hk", + "aff_unique_abbr": "SenseTime;ANU;OPPO RI;SAIL;HKU", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1+0;1;0+0;1;0", + "aff_country_unique": "China;Australia" + }, + { + "id": "2022.findings-emnlp.304", + "title": "The Effects of Corpus Choice and Morphosyntax on Multilingual Space Induction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In an effort to study the inductive biases of language models, numerous studies have attempted to use linguistically motivated tasks as a proxy of sorts, wherein performance on these tasks would imply an inductive bias towards a specific linguistic phenomenon. In this study, we attempt to analyse the inductive biases of language models with respect to natural language phenomena, in the context of building multilingual embedding spaces.We sample corpora from 2 sources in 15 languages and train language models on pseudo-bilingual variants of each corpus, created by duplicating each corpus and shifting token indices for half the resulting corpus. We evaluate the cross-lingual capabilities of these LMs, and show that while correlations with language families tend to be weak, other corpus-level characteristics, such as type-token ratio, tend to be more strongly correlated. Finally, we show that multilingual spaces can be built, albeit less effectively, even when additional destructive perturbations are applied to the training corpora, implying that (effectively) bag-of-words models also have an inductive bias that is sufficient for inducing multilingual spaces.", + "author": "Vinit Ravishankar; Joakim Nivre", + "authorids": "/v/vinit-ravishankar/; /j/joakim-nivre/", + "bibtex": "@inproceedings{ravishankar-nivre-2022-effects,\n title = \"The Effects of Corpus Choice and Morphosyntax on Multilingual Space Induction\",\n author = \"Ravishankar, Vinit and\n Nivre, Joakim\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.304/\",\n doi = \"10.18653/v1/2022.findings-emnlp.304\",\n pages = \"4130--4139\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.304.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.304/", + "pdf_size": 538938, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=881990960102671464&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Department of Informatics, University of Oslo + RISE Research Institutes of Sweden; Dept. of Linguistics and Philology, Uppsala University", + "aff_domain": "ifi.uio.no;ri.se", + "email": "ifi.uio.no;ri.se", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;2", + "aff_unique_norm": "University of Oslo;RISE Research Institutes of Sweden;Uppsala University", + "aff_unique_dep": "Department of Informatics;;Dept. of Linguistics and Philology", + "aff_unique_url": "https://www.uio.no;https://www.rise.se;https://www.uu.se", + "aff_unique_abbr": "UiO;RISE;UU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1", + "aff_country_unique": "Norway;Sweden" + }, + { + "id": "2022.emnlp-main.9", + "title": "The Geometry of Multilingual Language Model Representations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We assess how multilingual language models maintain a shared multilingual representation space while still encoding language-sensitive information in each language. Using XLM-R as a case study, we show that languages occupy similar linear subspaces after mean-centering, evaluated based on causal effects on language modeling performance and direct comparisons between subspaces for 88 languages. The subspace means differ along language-sensitive axes that are relatively stable throughout middle layers, and these axes encode information such as token vocabularies. Shifting representations by language means is sufficient to induce token predictions in different languages. However, we also identify stable language-neutral axes that encode information such as token positions and part-of-speech. We visualize representations projected onto language-sensitive and language-neutral axes, identifying language family and part-of-speech clusters, along with spirals, toruses, and curves representing token position information. These results demonstrate that multilingual language models encode information along orthogonal language-sensitive and language-neutral axes, allowing the models to extract a variety of features for downstream tasks and cross-lingual transfer learning.", + "author": "Tyler Chang; Zhuowen Tu; Benjamin Bergen", + "authorids": "/t/tyler-chang/; /z/zhuowen-tu/; /b/benjamin-bergen/", + "bibtex": "@inproceedings{chang-etal-2022-geometry,\n title = \"The Geometry of Multilingual Language Model Representations\",\n author = \"Chang, Tyler and\n Tu, Zhuowen and\n Bergen, Benjamin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.9/\",\n doi = \"10.18653/v1/2022.emnlp-main.9\",\n pages = \"119--136\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.9.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.9/", + "pdf_size": 7773833, + "gs_citation": 59, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8474184066579861328&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Cognitive Science, University of California San Diego + Hal\u0131c\u0131o\u011flu Data Science Institute, University of California San Diego; Department of Cognitive Science, University of California San Diego; Department of Cognitive Science, University of California San Diego", + "aff_domain": "ucsd.edu;ucsd.edu;ucsd.edu", + "email": "ucsd.edu;ucsd.edu;ucsd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+0;0;0", + "aff_unique_norm": "University of California San Diego", + "aff_unique_dep": "Department of Cognitive Science", + "aff_unique_url": "https://ucsd.edu", + "aff_unique_abbr": "UCSD", + "aff_campus_unique_index": "0+0;0;0", + "aff_campus_unique": "San Diego", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.13", + "title": "The Importance of Being Parameters: An Intra-Distillation Method for Serious Gains", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent model pruning methods have demonstrated the ability to remove redundant parameters without sacrificing model performance. Common methods remove redundant parameters according to the parameter sensitivity, a gradient-based measure reflecting the contribution of the parameters. In this paper, however, we argue that redundant parameters can be trained to make beneficial contributions. We first highlight the large sensitivity (contribution) gap among high-sensitivity and low-sensitivity parameters and show that the model generalization performance can be significantly improved after balancing the contribution of all parameters. Our goal is to balance the sensitivity of all parameters and encourage all of them to contribute equally. We propose a general task-agnostic method, namely intra-distillation, appended to the regular training loss to balance parameter sensitivity. Moreover, we also design a novel adaptive learning method to control the strength of intra-distillation loss for faster convergence. Our experiments show the strong effectiveness of our methods on machine translation, natural language understanding, and zero-shot cross-lingual transfer across up to 48 languages, e.g., a gain of 3.54 BLEU on average across 8 language pairs from the IWSLT\u201914 dataset.", + "author": "Haoran Xu; Philipp Koehn; Kenton Murray", + "authorids": "/h/haoran-xu/; /p/philipp-koehn/; /k/kenton-murray/", + "bibtex": "@inproceedings{xu-etal-2022-importance,\n title = \"The Importance of Being Parameters: An Intra-Distillation Method for Serious Gains\",\n author = \"Xu, Haoran and\n Koehn, Philipp and\n Murray, Kenton\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.13/\",\n doi = \"10.18653/v1/2022.emnlp-main.13\",\n pages = \"170--183\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.13.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.13/", + "pdf_size": 983050, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4218890614037550471&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 3, + "aff": "Johns Hopkins University; Johns Hopkins University; Johns Hopkins University", + "aff_domain": "jhu.edu;jhu.edu;jhu.edu", + "email": "jhu.edu;jhu.edu;jhu.edu", + "github": "https://github.com/fe1ixxu/Intra-Distillation", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Johns Hopkins University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.jhu.edu", + "aff_unique_abbr": "JHU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.279", + "title": "The Optimal BERT Surgeon: Scalable and Accurate Second-Order Pruning for Large Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we consider the problem of sparsifying BERT models, which are a key building block for natural language processing, in order to reduce their storage and computational cost. We introduce the Optimal BERT Surgeon (oBERT), an efficient and accurate pruning method based on approximate second-order information, which we show to yield state-of-the-art results in both stages of language tasks: pre-training and fine-tuning. Specifically, oBERT extends existing work on second-order pruning by allowing for pruning weight blocks, and is the first such method that is applicable at BERT scale. Second, we investigate compounding compression approaches to obtain highly compressed but accurate models for deployment on edge devices. These models significantly push boundaries of the current state-of-the-art sparse BERT models with respect to all metrics: model size, inference speed and task accuracy. For example, relative to the dense BERT-base, we obtain 10x model size compression with < 1% accuracy drop, 10x CPU-inference speedup with < 2% accuracy drop, and 29x CPU-inference speedup with < 7.5% accuracy drop. Our code, fully integrated with Transformers and SparseML, is available at https://github.com/neuralmagic/sparseml/tree/main/research/optimal_BERT_surgeon_oBERT.", + "author": "Eldar Kurtic; Daniel Campos; Tuan Nguyen; Elias Frantar; Mark Kurtz; Benjamin Fineran; Michael Goin; Dan Alistarh", + "authorids": "/e/eldar-kurtic/; /d/daniel-campos/; /t/tuan-nguyen/; /e/elias-frantar/; /m/mark-kurtz/; /b/benjamin-fineran/; /m/michael-goin/; /d/dan-alistarh/", + "bibtex": "@inproceedings{kurtic-etal-2022-optimal,\n title = \"The Optimal {BERT} Surgeon: Scalable and Accurate Second-Order Pruning for Large Language Models\",\n author = \"Kurtic, Eldar and\n Campos, Daniel and\n Nguyen, Tuan and\n Frantar, Elias and\n Kurtz, Mark and\n Fineran, Benjamin and\n Goin, Michael and\n Alistarh, Dan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.279/\",\n doi = \"10.18653/v1/2022.emnlp-main.279\",\n pages = \"4163--4181\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.279.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.279/", + "pdf_size": 522563, + "gs_citation": 146, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16031482023959843891&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": "Institute of Science and Technology Austria; Neural Magic Inc. + Department of Computer Science, University of Illinois Urbana-Champaign; Neural Magic Inc.; Institute of Science and Technology Austria; Neural Magic Inc.; Neural Magic Inc.; Neural Magic Inc.; Institute of Science and Technology Austria + Neural Magic Inc.", + "aff_domain": "ist.ac.at; ; ; ; ; ; ;ist.ac.at", + "email": "ist.ac.at; ; ; ; ; ; ;ist.ac.at", + "github": "https://github.com/neuralmagic/sparseml/tree/main/research/optimal_BERT_surgeon_oBERT", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1+2;1;0;1;1;1;0+1", + "aff_unique_norm": "Institute of Science and Technology Austria;Neural Magic Inc.;University of Illinois Urbana-Champaign", + "aff_unique_dep": ";;Department of Computer Science", + "aff_unique_url": "https://www.ist.ac.at;https://www.neuralmagic.com;https://illinois.edu", + "aff_unique_abbr": "IST Austria;Neural Magic;UIUC", + "aff_campus_unique_index": "1;", + "aff_campus_unique": ";Urbana-Champaign", + "aff_country_unique_index": "0;1+1;1;0;1;1;1;0+1", + "aff_country_unique": "Austria;United States" + }, + { + "id": "2022.findings-emnlp.373", + "title": "The Undesirable Dependence on Frequency of Gender Bias Metrics Based on Word Embeddings", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Numerous works use word embedding-based metrics to quantify societal biases and stereotypes in texts. Recent studies have found that word embeddings can capture semantic similarity but may be affected by word frequency. In this work we study the effect of frequency when measuring female vs. male gender bias with word embedding-based bias quantification methods. We find that Skip-gram with negative sampling and GloVe tend to detect male bias in high frequency words, while GloVe tends to return female bias in low frequency words. We show these behaviors still exist when words are randomly shuffled. This proves that the frequency-based effect observed in unshuffled corpora stems from properties of the metric rather than from word associations. The effect is spurious and problematic since bias metrics should depend exclusively on word co-occurrences and not individual word frequencies. Finally, we compare these results with the ones obtained with an alternative metric based on Pointwise Mutual Information. We find that this metric does not show a clear dependence on frequency, even though it is slightly skewed towards male bias across all frequencies.", + "author": "Francisco Valentini; Germ\u00e1n Rosati; Diego Fernandez Slezak; Edgar Altszyler", + "authorids": "/f/francisco-valentini/; /g/german-rosati/; /d/diego-fernandez-slezak/; /e/edgar-altszyler/", + "bibtex": "@inproceedings{valentini-etal-2022-undesirable,\n title = \"The Undesirable Dependence on Frequency of Gender Bias Metrics Based on Word Embeddings\",\n author = \"Valentini, Francisco and\n Rosati, Germ{\\'a}n and\n Fernandez Slezak, Diego and\n Altszyler, Edgar\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.373/\",\n doi = \"10.18653/v1/2022.findings-emnlp.373\",\n pages = \"5086--5092\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.373.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.373/", + "pdf_size": 1034152, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7368268633436425447&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "ICC (UBA-CONICET) + Maestr\u00eda en Data Mining (UBA); Escuela IDAES (UNSAM); ICC (UBA-CONICET); ICC (UBA-CONICET) + Maestr\u00eda en Data Mining (UBA)", + "aff_domain": "gmail.com;unsam.edu.ar;dc.uba.ar;dc.uba.ar", + "email": "gmail.com;unsam.edu.ar;dc.uba.ar;dc.uba.ar", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2;0;0+1", + "aff_unique_norm": "Instituto de Ciencias de la Computaci\u00f3n;Universidad de Buenos Aires;Escuela IDAES", + "aff_unique_dep": ";Maestr\u00eda en Data Mining;", + "aff_unique_url": "http://www.icc.uba.ar/;https://www.uba.ar;https://www.idaes.org.ar", + "aff_unique_abbr": "ICC;UBA;IDAES", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0+0", + "aff_country_unique": "Argentina" + }, + { + "id": "2022.emnlp-main.746", + "title": "The better your Syntax, the better your Semantics? Probing Pretrained Language Models for the English Comparative Correlative", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Construction Grammar (CxG) is a paradigm from cognitive linguistics emphasising the connection between syntax and semantics. Rather than rules that operate on lexical items, it posits constructions as the central building blocks of language, i.e., linguistic units of different granularity that combine syntax and semantics. As a first step towards assessing the compatibility of CxG with the syntactic and semantic knowledge demonstrated by state-of-the-art pretrained language models (PLMs), we present an investigation of their capability to classify and understand one of the most commonly studied constructions, the English comparative correlative (CC). We conduct experiments examining the classification accuracy of a syntactic probe on the one hand and the models\u2019 behaviour in a semantic application task on the other, with BERT, RoBERTa, and DeBERTa as the example PLMs. Our results show that all three investigated PLMs are able to recognise the structure of the CC but fail to use its meaning. While human-like performance of PLMs on many NLP tasks has been alleged, this indicates that PLMs still suffer from substantial shortcomings in central domains of linguistic knowledge.", + "author": "Leonie Weissweiler; Valentin Hofmann; Abdullatif K\u00f6ksal; Hinrich Sch\u00fctze", + "authorids": "/l/leonie-weissweiler/; /v/valentin-hofmann/; /a/abdullatif-koksal/; /h/hinrich-schutze/", + "bibtex": "@inproceedings{weissweiler-etal-2022-better,\n title = \"The better your Syntax, the better your Semantics? Probing Pretrained Language Models for the {E}nglish Comparative Correlative\",\n author = {Weissweiler, Leonie and\n Hofmann, Valentin and\n K{\\\"o}ksal, Abdullatif and\n Sch{\\\"u}tze, Hinrich},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.746/\",\n doi = \"10.18653/v1/2022.emnlp-main.746\",\n pages = \"10859--10882\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.746.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.746/", + "pdf_size": 637382, + "gs_citation": 43, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17431373921645262877&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff": "Center for Information and Language Processing, LMU Munich + Munich Center of Machine Learning; Faculty of Linguistics, University of Oxford + Center for Information and Language Processing, LMU Munich + Munich Center of Machine Learning; Center for Information and Language Processing, LMU Munich + Munich Center of Machine Learning; Center for Information and Language Processing, LMU Munich + Munich Center of Machine Learning", + "aff_domain": "cis.lmu.de;ling-phil.ox.ac.uk;cis.lmu.de; ", + "email": "cis.lmu.de;ling-phil.ox.ac.uk;cis.lmu.de; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2+0+1;0+1;0+1", + "aff_unique_norm": "LMU Munich;Munich Center of Machine Learning;University of Oxford", + "aff_unique_dep": "Center for Information and Language Processing;Center of Machine Learning;Faculty of Linguistics", + "aff_unique_url": "https://www.lmu.de;https://www.munich-center-for-machine-learning.de/;https://www.ox.ac.uk", + "aff_unique_abbr": "LMU;;Oxford", + "aff_campus_unique_index": "0;2+0;0;0", + "aff_campus_unique": "Munich;;Oxford", + "aff_country_unique_index": "0+0;1+0+0;0+0;0+0", + "aff_country_unique": "Germany;United Kingdom" + }, + { + "id": "2022.findings-emnlp.195", + "title": "The challenges of temporal alignment on Twitter during crises", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Language use changes over time, and this impacts the effectiveness of NLP systems. This phenomenon is even more prevalent in social media data during crisis events where meaning and frequency of word usage may change over the course of days. Contextual language models fail to adapt temporally, emphasizing the need for temporal adaptation in models which need to be deployed over an extended period of time. While existing approaches consider data spanning large periods of time (from years to decades), shorter time spans are critical for crisis data. We quantify temporal degradation for this scenario and propose methods to cope with performance loss by leveraging techniques from domain adaptation. To the best of our knowledge, this is the first effort to explore effects of rapid language change driven by adversarial adaptations, particularly during natural and human-induced disasters. Through extensive experimentation on diverse crisis datasets, we analyze under what conditions our approaches outperform strong baselines while highlighting the current limitations of temporal adaptation methods in scenarios where access to unlabeled data is scarce.", + "author": "Aniket Pramanick; Tilman Beck; Kevin Stowe; Iryna Gurevych", + "authorids": "/a/aniket-pramanick/; /t/tilman-beck/; /k/kevin-stowe/; /i/iryna-gurevych/", + "bibtex": "@inproceedings{pramanick-etal-2022-challenges,\n title = \"The challenges of temporal alignment on {T}witter during crises\",\n author = \"Pramanick, Aniket and\n Beck, Tilman and\n Stowe, Kevin and\n Gurevych, Iryna\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.195/\",\n doi = \"10.18653/v1/2022.findings-emnlp.195\",\n pages = \"2658--2672\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.195.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.195/", + "pdf_size": 379237, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3450906516301258767&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University Darmstadt; Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University Darmstadt; Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University Darmstadt; Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University Darmstadt", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/UKPLab/emnlp2022-temporal-adaptation", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1+2;0+1+2;0+1+2;0+1+2", + "aff_unique_norm": "University of Duisburg-Essen;University of Massachusetts Amherst;Technical University of Darmstadt", + "aff_unique_dep": "Ubiquitous Knowledge Processing Lab;Department of Computer Science;", + "aff_unique_url": "https://www.ukp.tu-darmstadt.de/;https://www.cics.umass.edu;https://www.tu-darmstadt.de", + "aff_unique_abbr": "UKP Lab;UMass CS;TUD", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1+0;0+1+0;0+1+0;0+1+0", + "aff_country_unique": "Germany;United States" + }, + { + "id": "2022.emnlp-main.731", + "title": "The \u201cProblem\u201d of Human Label Variation: On Ground Truth in Data, Modeling and Evaluation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Human variation in labeling is often considered noise. Annotation projects for machine learning (ML) aim at minimizing human label variation, with the assumption to maximize data quality and in turn optimize and maximize machine learning metrics. However, thisconventional practice assumes that there exists a *ground truth*, and neglects that there exists genuine human variation in labeling due to disagreement, subjectivity in annotation or multiple plausible answers.In this position paper, we argue that this big open problem of human label variation persists and critically needs more attention to move our field forward. This is because human label variation impacts all stages of the ML pipeline: *data, modeling and evaluation*. However, few works consider all of these dimensions jointly; and existing research is fragmented. We reconcile different previously proposed notions of human label variation, provide a repository of publicly-available datasets with un-aggregated labels, depict approaches proposed so far, identify gaps and suggest ways forward. As datasets are becoming increasingly available, we hope that this synthesized view on the \u201cproblem\u201d will lead to an open discussion on possible strategies to devise fundamentally new directions.", + "author": "Barbara Plank", + "authorids": "/b/barbara-plank/", + "bibtex": "@inproceedings{plank-2022-problem,\n title = \"The {\\textquotedblleft}Problem{\\textquotedblright} of Human Label Variation: On Ground Truth in Data, Modeling and Evaluation\",\n author = \"Plank, Barbara\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.731/\",\n doi = \"10.18653/v1/2022.emnlp-main.731\",\n pages = \"10671--10682\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.731.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.731/", + "pdf_size": 438141, + "gs_citation": 212, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10614249815767236121&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Center for Information and Language Processing (CIS), MaiNLP lab, LMU Munich, Germany+Munich Center for Machine Learning (MCML), Munich, Germany", + "aff_domain": "lmu.de", + "email": "lmu.de", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0+1", + "aff_unique_norm": "LMU Munich;Munich Center for Machine Learning", + "aff_unique_dep": "Center for Information and Language Processing (CIS);", + "aff_unique_url": "https://www.lmu.de;", + "aff_unique_abbr": "LMU;MCML", + "aff_campus_unique_index": "0+0", + "aff_campus_unique": "Munich", + "aff_country_unique_index": "0+0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.123", + "title": "There Is No Standard Answer: Knowledge-Grounded Dialogue Generation with Adversarial Activated Multi-Reference Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge-grounded dialogue (KGC) shows excellent potential to deliver an engaging and informative response. However, existing approaches emphasize selecting one golden knowledge given a particular dialogue context, overlooking the one-to-many phenomenon in dialogue. As a result, existing paradigm limits the diversity of knowledge selection and generation. To this end, we establish a multi-reference KGC dataset and propose a series of metrics to systematically assess the one-to-many efficacy of existing KGC models. Furthermore, to extend the hypothesis space of knowledge selection to enhance the mapping relationship between multiple knowledge and multiple responses, we devise a span-based variational model and optimize the model in a wake-sleep style with an ameliorated evidence lower bound objective to learn the one-to-many generalization. Both automatic and human evaluations demonstrate the efficacy of our approach.", + "author": "Xueliang Zhao; Tingchen Fu; Chongyang Tao; Rui Yan", + "authorids": "/x/xueliang-zhao/; /t/tingchen-fu/; /c/chongyang-tao/; /r/rui-yan/", + "bibtex": "@inproceedings{zhao-etal-2022-standard,\n title = \"There Is No Standard Answer: Knowledge-Grounded Dialogue Generation with Adversarial Activated Multi-Reference Learning\",\n author = \"Zhao, Xueliang and\n Fu, Tingchen and\n Tao, Chongyang and\n Yan, Rui\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.123/\",\n doi = \"10.18653/v1/2022.emnlp-main.123\",\n pages = \"1878--1891\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.123.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.123/", + "pdf_size": 439379, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6767964171870548308&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Wangxuan Institute of Computer Technology, Peking University; Gaoling School of Artificial Intelligence, Renmin University of China; Microsoft Corporation; Gaoling School of Artificial Intelligence, Renmin University of China", + "aff_domain": "gmail.com;gmail.com;gmail.com;ruc.edu.cn", + "email": "gmail.com;gmail.com;gmail.com;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;1", + "aff_unique_norm": "Peking University;Renmin University of China;Microsoft Corporation", + "aff_unique_dep": "Wangxuan Institute of Computer Technology;Gaoling School of Artificial Intelligence;", + "aff_unique_url": "http://www.pku.edu.cn;http://www.ruc.edu.cn;https://www.microsoft.com", + "aff_unique_abbr": "PKU;RUC;Microsoft", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.226", + "title": "Think Beyond Words: Exploring Context-Relevant Visual Commonsense for Diverse Dialogue Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Commonsense knowledge has been widely considered for building intelligent open-domain dialogue agents, aiming to generate meaningful and diverse responses. Previous works in this field usually lack the ability to effectively obtain and utilize auxiliary commonsense from the external visual world. In this paper, we argue that exploiting logical information in images related to context can be effective to enrich and steer the generation process. In view of this, we propose VICTOR, a context-relevant VIsual Commonsense enhanced dialogue generaTOR for generating coherent and informative responses. To obtain the associated visual commonsense, we devise a novel approach that expands topic words on the knowledge graph and maps them into daily scenarios. During the generation, the model adopts multimodal fusion mechanism to integrate visual and textual information, and adaptively combine their decoding distributions for better response generation. The experimental results on two public datasets show that our proposed method outperforms the latest competitive methods in terms of coherence and diversity.", + "author": "Yiting Liu; Liang Li; Beichen Zhang; Qingming Huang", + "authorids": "/y/yiting-liu/; /l/liang-li/; /b/beichen-zhang/; /q/qingming-huang/", + "bibtex": "@inproceedings{liu-etal-2022-think,\n title = \"Think Beyond Words: Exploring Context-Relevant Visual Commonsense for Diverse Dialogue Generation\",\n author = \"Liu, Yiting and\n Li, Liang and\n Zhang, Beichen and\n Huang, Qingming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.226/\",\n doi = \"10.18653/v1/2022.findings-emnlp.226\",\n pages = \"3106--3117\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.226.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.226/", + "pdf_size": 1963343, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6601244020992924645&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS); University of Chinese Academy of Sciences; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences", + "aff_domain": "ict.ac.cn;ict.ac.cn;vipl.ict.ac.cn;ucas.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;vipl.ict.ac.cn;ucas.ac.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0;1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Computing Technology;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.329", + "title": "Thinking about GPT-3 In-Context Learning for Biomedical IE? Think Again", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large pre-trained language models (PLMs) such as GPT-3 have shown strong in-context learning capabilities, which are highly appealing for domains such as biomedicine that feature high and diverse demands of language technologies but also high data annotation costs. In this paper, we present the first systematic and comprehensive study to compare the few-shot performance of GPT-3 in-context learning with fine-tuning smaller (i.e., BERT-sized) PLMs on two representative biomedical information extraction (IE) tasks: named entity recognition and relation extraction. We follow the true few-shot setting to avoid overestimating models\u2019 few-shot performance by model selection over a large validation set. We also optimize GPT-3\u2019s performance with known techniques such as contextual calibration and dynamic in-context example retrieval. However, our results show that GPT-3 still significantly underperforms compared to simply fine-tuning a smaller PLM. In addition, GPT-3 in-context learning also yields smaller gains in accuracy when more training data becomes available. More in-depth analyses further reveal issues of in-context learning that may be detrimental to IE tasks in general. Given the high cost of experimenting with GPT-3, we hope our study provides helpful guidance for biomedical researchers and practitioners towards more practical solutions such as fine-tuning small PLMs before better in-context learning is available for biomedical IE.", + "author": "Bernal Jimenez Gutierrez; Nikolas McNeal; Clayton Washington; You Chen; Lang Li; Huan Sun; Yu Su", + "authorids": "/b/bernal-jimenez-gutierrez/; /n/nikolas-mcneal/; /c/clayton-washington/; /y/you-chen/; /l/lang-li/; /h/huan-sun/; /y/yu-su/", + "bibtex": "@inproceedings{jimenez-gutierrez-etal-2022-thinking,\n title = \"Thinking about {GPT}-3 In-Context Learning for Biomedical {IE}? Think Again\",\n author = \"Jimenez Gutierrez, Bernal and\n McNeal, Nikolas and\n Washington, Clayton and\n Chen, You and\n Li, Lang and\n Sun, Huan and\n Su, Yu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.329/\",\n doi = \"10.18653/v1/2022.findings-emnlp.329\",\n pages = \"4497--4512\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.329.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.329/", + "pdf_size": 1233627, + "gs_citation": 162, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9015008576735138115&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "The Ohio State University; The Ohio State University; The Ohio State University; Vanderbilt University; The Ohio State University; The Ohio State University; The Ohio State University", + "aff_domain": "osu.edu;osu.edu;osu.edu;vumc.org;osumc.edu;osu.edu;osu.edu", + "email": "osu.edu;osu.edu;osu.edu;vumc.org;osumc.edu;osu.edu;osu.edu", + "github": "https://github.com/dki-lab/few-shot-bioIE", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;0;0;0", + "aff_unique_norm": "The Ohio State University;Vanderbilt University", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.osu.edu;https://www.vanderbilt.edu", + "aff_unique_abbr": "OSU;Vanderbilt", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.228", + "title": "Third-Party Aligner for Neural Word Alignments", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Word alignment is to find translationally equivalent words between source and target sentences. Previous work has demonstrated that self-training can achieve competitive word alignment results. In this paper, we propose to use word alignments generated by a third-party word aligner to supervise the neural word alignment training. Specifically, source word and target word of each word pair aligned by the third-party aligner are trained to be close neighbors to each other in the contextualized embedding space when fine-tuning a pre-trained cross-lingual language model. Experiments on the benchmarks of various language pairs show that our approach can surprisingly do self-correction over the third-party supervision by finding more accurate word alignments and deleting wrong word alignments, leading to better performance than various third-party word aligners, including the currently best one. When we integrate all supervisions from various third-party aligners, we achieve state-of-the-art word alignment performances, with averagely more than two points lower alignment error rates than the best third-party aligner.We released our code at https://github.com/sdongchuanqi/Third-Party-Supervised-Aligner.", + "author": "Jinpeng Zhang; Chuanqi Dong; Xiangyu Duan; Yuqi Zhang; Min Zhang", + "authorids": "/j/jinpeng-zhang/; /c/chuanqi-dong/; /x/xiangyu-duan/; /y/yuqi-zhang/; /m/min-zhang/", + "bibtex": "@inproceedings{zhang-etal-2022-third,\n title = \"Third-Party Aligner for Neural Word Alignments\",\n author = \"Zhang, Jinpeng and\n Dong, Chuanqi and\n Duan, Xiangyu and\n Zhang, Yuqi and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.228/\",\n doi = \"10.18653/v1/2022.findings-emnlp.228\",\n pages = \"3134--3145\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.228.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.228/", + "pdf_size": 995935, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:AIMzdyOLqMMJ:scholar.google.com/&scioq=Third-Party+Aligner+for+Neural+Word+Alignments&hl=en&as_sdt=0,23", + "gs_version_total": 3, + "aff": "Institute of Aritificial Intelligence, School of Computer Science and Technology, Soochow University; Institute of Aritificial Intelligence, School of Computer Science and Technology, Soochow University; Institute of Aritificial Intelligence, School of Computer Science and Technology, Soochow University; Alibaba DAMO Academy; Institute of Aritificial Intelligence, School of Computer Science and Technology, Soochow University", + "aff_domain": "stu.suda.edu.cn;stu.suda.edu.cn;suda.edu.cn;alibaba-inc.com;suda.edu.cn", + "email": "stu.suda.edu.cn;stu.suda.edu.cn;suda.edu.cn;alibaba-inc.com;suda.edu.cn", + "github": "https://github.com/sdongchuanqi/Third-Party-Supervised-Aligner", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Soochow University;Alibaba Group", + "aff_unique_dep": "Institute of Artificial Intelligence, School of Computer Science and Technology;DAMO Academy", + "aff_unique_url": "https://www.soochow.edu.cn;https://www.alibaba-group.com", + "aff_unique_abbr": "Soochow U;Alibaba DAMO", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.791", + "title": "Three Real-World Datasets and Neural Computational Models for Classification Tasks in Patent Landscaping", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Patent Landscaping, one of the central tasks of intellectual property management, includes selecting and grouping patents according to user-defined technical or application-oriented criteria. While recent transformer-based models have been shown to be effective for classifying patents into taxonomies such as CPC or IPC, there is yet little research on how to support real-world Patent Landscape Studies (PLSs) using natural language processing methods. With this paper, we release three labeled datasets for PLS-oriented classification tasks covering two diverse domains. We provide a qualitative analysis and report detailed corpus statistics.Most research on neural models for patents has been restricted to leveraging titles and abstracts. We compare strong neural and non-neural baselines, proposing a novel model that takes into account textual information from the patents\u2019 full texts as well as embeddings created based on the patents\u2019 CPC labels. We find that for PLS-oriented classification tasks, going beyond title and abstract is crucial, CPC labels are an effective source of information, and combining all features yields the best results.", + "author": "Subhash Pujari; Jannik Str\u00f6tgen; Mark Giereth; Michael Gertz; Annemarie Friedrich", + "authorids": "/s/subhash-pujari/; /j/jannik-strotgen/; /m/mark-giereth/; /m/michael-gertz/; /a/annemarie-friedrich/", + "bibtex": "@inproceedings{pujari-etal-2022-three,\n title = \"Three Real-World Datasets and Neural Computational Models for Classification Tasks in Patent Landscaping\",\n author = {Pujari, Subhash and\n Str{\\\"o}tgen, Jannik and\n Giereth, Mark and\n Gertz, Michael and\n Friedrich, Annemarie},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.791/\",\n doi = \"10.18653/v1/2022.emnlp-main.791\",\n pages = \"11498--11513\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.791.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.791/", + "pdf_size": 685256, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=553065645901852326&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Bosch Center for Artificial Intelligence, Renningen, Germany + Institute of Computer Science, Heidelberg University, Heidelberg, Germany; Bosch Center for Artificial Intelligence, Renningen, Germany; Robert Bosch GmbH, Stuttgart, Germany; Institute of Computer Science, Heidelberg University, Heidelberg, Germany; Bosch Center for Artificial Intelligence, Renningen, Germany", + "aff_domain": "de.bosch.com;de.bosch.com;de.bosch.com;informatik.uni-heidelberg.de;de.bosch.com", + "email": "de.bosch.com;de.bosch.com;de.bosch.com;informatik.uni-heidelberg.de;de.bosch.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;0;2;1;0", + "aff_unique_norm": "Bosch Center for Artificial Intelligence;Heidelberg University;Robert Bosch GmbH", + "aff_unique_dep": "Artificial Intelligence;Institute of Computer Science;", + "aff_unique_url": "https://www.bosch-ai.com;https://www.uni-heidelberg.de;https://www.bosch.com", + "aff_unique_abbr": "BCAI;Uni HD;Bosch", + "aff_campus_unique_index": "0+1;0;2;1;0", + "aff_campus_unique": "Renningen;Heidelberg;Stuttgart", + "aff_country_unique_index": "0+0;0;0;0;0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.findings-emnlp.535", + "title": "Time-aware Prompting for Text Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In this paper, we study the effects of incorporating timestamps, such as document creation dates, into generation systems. Two types of time-aware prompts are investigated: (1) textual prompts that encode document timestamps in natural language sentences; and (2) linear prompts that convert timestamps into continuous vectors. To explore extrapolation to future data points, we further introduce a new data-to-text generation dataset, TempWikiBio, containing more than 4 millions of chronologically ordered revisions of biographical articles from English Wikipedia, each paired with structured personal profiles.Through data-to-text generation on TempWikiBio, text-to-text generation on the content transfer dataset, and summarization on XSum,we show that linear prompts on encoder and textual prompts improve the generation quality on all datasets.Despite having less performance drop when testing on data drawn from a later time, linear prompts focus more on non-temporal information and are less sensitive to the given timestamps, according to human evaluations and sensitivity analyses.Meanwhile, textual prompts establish the association between the given timestamps and the output dates, yielding more factual temporal information in the output.", + "author": "Shuyang Cao; Lu Wang", + "authorids": "/s/shuyang-cao/; /l/lu-wang/", + "bibtex": "@inproceedings{cao-wang-2022-time,\n title = \"Time-aware Prompting for Text Generation\",\n author = \"Cao, Shuyang and\n Wang, Lu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.535/\",\n doi = \"10.18653/v1/2022.findings-emnlp.535\",\n pages = \"7231--7246\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.535.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.535/", + "pdf_size": 467475, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13265324436643932950&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Computer Science and Engineering, University of Michigan; Computer Science and Engineering, University of Michigan", + "aff_domain": "umich.edu;umich.edu", + "email": "umich.edu;umich.edu", + "github": "https://shuyangcao.github.io/projects/temporal_prompt_generation", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of Michigan", + "aff_unique_dep": "Computer Science and Engineering", + "aff_unique_url": "https://www.umich.edu", + "aff_unique_abbr": "UM", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Ann Arbor", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.444", + "title": "Tiny-Attention Adapter: Contexts Are More Important Than the Number of Parameters", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Adapter-tuning is a paradigm that transfers a pretrained language model to downstream tasks by adding and tuning a small number of new parameters. Previously proposed adapter architectures are all feed-forward neural networks. In this paper, we investigate the effectiveness of using tiny-attention\u2014i.e., attention with extremely small per-head dimensionality\u2014as adapters. Our tiny-attention adapter learns to modify the hidden states at each position directly conditioned on the hidden states at all the other positions, which is missed by the previously proposed adapters. Moreover, we view its multiple attention heads as a mixture of experts and propose to average their weights during deployment, which further reduces its inference computation cost. On the GLUE benchmark, our tiny-attention adapter outperforms the other parameter-efficient transfer learning methods as well as full fine-tuning while only updating 0.05% of the parameters. On the FewGLUE benchmark, its performance is comparable to that of GPT-3 and PET.", + "author": "Hongyu Zhao; Hao Tan; Hongyuan Mei", + "authorids": "/h/hongyu-zhao/; /h/hao-tan/; /h/hongyuan-mei/", + "bibtex": "@inproceedings{zhao-etal-2022-tiny,\n title = \"Tiny-Attention Adapter: Contexts Are More Important Than the Number of Parameters\",\n author = \"Zhao, Hongyu and\n Tan, Hao and\n Mei, Hongyuan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.444/\",\n doi = \"10.18653/v1/2022.emnlp-main.444\",\n pages = \"6626--6638\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.444.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.444/", + "pdf_size": 889655, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4892664699573028506&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "University of Chicago + TTI-Chicago; Adobe Research; TTI-Chicago", + "aff_domain": "uchicago.edu;adobe.com;ttic.edu", + "email": "uchicago.edu;adobe.com;ttic.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;2;1", + "aff_unique_norm": "University of Chicago;Toyota Technological Institute at Chicago;Adobe", + "aff_unique_dep": ";;Adobe Research", + "aff_unique_url": "https://www.uchicago.edu;https://www.tti-chicago.org;https://research.adobe.com", + "aff_unique_abbr": "UChicago;TTI;Adobe", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Chicago", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.368", + "title": "Tiny-NewsRec: Effective and Efficient PLM-based News Recommendation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "News recommendation is a widely adopted technique to provide personalized news feeds for the user. Recently, pre-trained language models (PLMs) have demonstrated the great capability of natural language understanding and benefited news recommendation via improving news modeling. However, most existing works simply finetune the PLM with the news recommendation task, which may suffer from the known domain shift problem between the pre-training corpus and downstream news texts. Moreover, PLMs usually contain a large volume of parameters and have high computational overhead, which imposes a great burden on low-latency online services. In this paper, we propose Tiny-NewsRec, which can improve both the effectiveness and the efficiency of PLM-based news recommendation. We first design a self-supervised domain-specific post-training method to better adapt the general PLM to the news domain with a contrastive matching task between news titles and news bodies. We further propose a two-stage knowledge distillation method to improve the efficiency of the large PLM-based news recommendation model while maintaining its performance. Multiple teacher models originated from different time steps of our post-training procedure are used to transfer comprehensive knowledge to the student model in both its post-training stage and finetuning stage. Extensive experiments on two real-world datasets validate the effectiveness and efficiency of our method.", + "author": "Yang Yu; Fangzhao Wu; Chuhan Wu; Jingwei Yi; Qi Liu", + "authorids": "/y/yang-yu/; /f/fangzhao-wu/; /c/chuhan-wu/; /j/jingwei-yi/; /q/qi-liu/", + "bibtex": "@inproceedings{yu-etal-2022-tiny,\n title = \"Tiny-{N}ews{R}ec: Effective and Efficient {PLM}-based News Recommendation\",\n author = \"Yu, Yang and\n Wu, Fangzhao and\n Wu, Chuhan and\n Yi, Jingwei and\n Liu, Qi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.368/\",\n doi = \"10.18653/v1/2022.emnlp-main.368\",\n pages = \"5478--5489\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.368.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.368/", + "pdf_size": 1455406, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7802794324648191423&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Science and Technology of China; Microsoft Research Asia; Tsinghua University; University of Science and Technology of China; University of Science and Technology of China", + "aff_domain": "mail.ustc.edu.cn;gmail.com;gmail.com;mail.ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;gmail.com;gmail.com;mail.ustc.edu.cn;ustc.edu.cn", + "github": "https://github.com/yflyl613/Tiny-NewsRec", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;0", + "aff_unique_norm": "University of Science and Technology of China;Microsoft Research;Tsinghua University", + "aff_unique_dep": ";Research;", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.microsoft.com/en-us/research/group/asia;https://www.tsinghua.edu.cn", + "aff_unique_abbr": "USTC;MSR Asia;THU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.437", + "title": "Title2Event: Benchmarking Open Event Extraction with a Large-scale Chinese Title Dataset", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Event extraction (EE) is crucial to downstream tasks such as new aggregation and event knowledge graph construction. Most existing EE datasets manually define fixed event types and design specific schema for each of them, failing to cover diverse events emerging from the online text. Moreover, news titles, an important source of event mentions, have not gained enough attention in current EE research. In this paper, we present Title2Event, a large-scale sentence-level dataset benchmarking Open Event Extraction without restricting event types. Title2Event contains more than 42,000 news titles in 34 topics collected from Chinese web pages. To the best of our knowledge, it is currently the largest manually annotated Chinese dataset for open event extraction. We further conduct experiments on Title2Event with different models and show that the characteristics of titles make it challenging for event extraction, addressing the significance of advanced study on this problem. The dataset and baseline codes are available at https://open-event-hub.github.io/title2event.", + "author": "Haolin Deng; Yanan Zhang; Yangfan Zhang; Wangyang Ying; Changlong Yu; Jun Gao; Wei Wang; Xiaoling Bai; Nan Yang; Jin Ma; Xiang Chen; Tianhua Zhou", + "authorids": "/h/haolin-deng/; /y/yanan-zhang/; /y/yangfan-zhang/; /w/wangyang-ying/; /c/changlong-yu/; /j/jun-gao/; /w/wei-wang/; /x/xiaoling-bai/; /n/nan-yang/; /j/jin-ma/; /x/xiang-chen/; /t/tianhua-zhou/", + "bibtex": "@inproceedings{deng-etal-2022-title2event,\n title = \"{T}itle2{E}vent: Benchmarking Open Event Extraction with a Large-scale {C}hinese Title Dataset\",\n author = \"Deng, Haolin and\n Zhang, Yanan and\n Zhang, Yangfan and\n Ying, Wangyang and\n Yu, Changlong and\n Gao, Jun and\n Wang, Wei and\n Bai, Xiaoling and\n Yang, Nan and\n Ma, Jin and\n Chen, Xiang and\n Zhou, Tianhua\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.437/\",\n doi = \"10.18653/v1/2022.emnlp-main.437\",\n pages = \"6511--6524\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.437.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.437/", + "pdf_size": 1301559, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1448545848064814853&as_sdt=5,39&sciodt=0,39&hl=en", + "gs_version_total": 5, + "aff": "Tencent; Tencent; Tencent; Tencent; HKUST; Tsinghua University; Tsinghua University; Tencent; Tencent; USTC; Tencent; Tencent", + "aff_domain": "gmail.com;tencent.com;tencent.com; ; ; ; ; ; ; ; ; ", + "email": "gmail.com;tencent.com;tencent.com; ; ; ; ; ; ; ; ; ", + "github": "https://open-event-hub.github.io/title2event", + "project": "", + "author_num": 12, + "aff_unique_index": "0;0;0;0;1;2;2;0;0;3;0;0", + "aff_unique_norm": "Tencent Holdings Limited;Hong Kong University of Science and Technology;Tsinghua University;University of Science and Technology of China", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.tencent.com;https://www.ust.hk;https://www.tsinghua.edu.cn;https://www.ustc.edu.cn", + "aff_unique_abbr": "Tencent;HKUST;THU;USTC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.136", + "title": "ToKen: Task Decomposition and Knowledge Infusion for Few-Shot Hate Speech Detection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Hate speech detection is complex; it relies on commonsense reasoning, knowledge of stereotypes, and an understanding of social nuance that differs from one culture to the next. It is also difficult to collect a large-scale hate speech annotated dataset. In this work, we frame this problem as a few-shot learning task, and show significant gains with decomposing the task into its \u201cconstituent\u201d parts. In addition, we see that infusing knowledge from reasoning datasets (e.g. ATOMIC2020) improves the performance even further. Moreover, we observe that the trained models generalize to out-of-distribution datasets, showing the superiority of task decomposition and knowledge infusion compared to previously used methods. Concretely, our method outperforms the baseline by 17.83% absolute gain in the 16-shot case.", + "author": "Badr AlKhamissi; Faisal Ladhak; Srinivasan Iyer; Veselin Stoyanov; Zornitsa Kozareva; Xian Li; Pascale Fung; Lambert Mathias; Asli Celikyilmaz; Mona Diab", + "authorids": "/b/badr-alkhamissi/; /f/faisal-ladhak/; /s/srinivasan-iyer/; /v/veselin-stoyanov/; /z/zornitsa-kozareva/; /x/xian-li/; /p/pascale-fung/; /l/lambert-mathias/; /a/asli-celikyilmaz/; /m/mona-diab/", + "bibtex": "@inproceedings{alkhamissi-etal-2022-token,\n title = \"{T}o{K}en: Task Decomposition and Knowledge Infusion for Few-Shot Hate Speech Detection\",\n author = \"AlKhamissi, Badr and\n Ladhak, Faisal and\n Iyer, Srinivasan and\n Stoyanov, Veselin and\n Kozareva, Zornitsa and\n Li, Xian and\n Fung, Pascale and\n Mathias, Lambert and\n Celikyilmaz, Asli and\n Diab, Mona\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.136/\",\n doi = \"10.18653/v1/2022.emnlp-main.136\",\n pages = \"2109--2120\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.136.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.136/", + "pdf_size": 447442, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13390938631983657152&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI; Meta AI", + "aff_domain": ";;;;;;;;;", + "email": ";;;;;;;;;", + "github": "", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Meta Platforms, Inc.", + "aff_unique_dep": "Meta AI", + "aff_unique_url": "https://meta.com", + "aff_unique_abbr": "Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.396", + "title": "Token-level Sequence Labeling for Spoken Language Understanding using Compositional End-to-End Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "End-to-end spoken language understanding (SLU) systems are gaining popularity over cascaded approaches due to their simplicity and ability to avoid error propagation. However, these systems model sequence labeling as a sequence prediction task causing a divergence from its well-established token-level tagging formulation. We build compositional end-to-end SLU systems that explicitly separate the added complexity of recognizing spoken mentions in SLU from the NLU task of sequence labeling. By relying on intermediate decoders trained for ASR, our end-to-end systems transform the input modality from speech to token-level representations that can be used in the traditional sequence labeling framework. This composition of ASR and NLU formulations in our end-to-end SLU system offers direct compatibility with pre-trained ASR and NLU systems, allows performance monitoring of individual components and enables the use of globally normalized losses like CRF, making them attractive in practical scenarios. Our models outperform both cascaded and direct end-to-end models on a labeling task of named entity recognition across SLU benchmarks.", + "author": "Siddhant Arora; Siddharth Dalmia; Brian Yan; Florian Metze; Alan W Black; Shinji Watanabe", + "authorids": "/s/siddhant-arora/; /s/siddharth-dalmia/; /b/brian-yan/; /f/florian-metze/; /a/alan-w-black/; /s/shinji-watanabe/", + "bibtex": "@inproceedings{arora-etal-2022-token,\n title = \"Token-level Sequence Labeling for Spoken Language Understanding using Compositional End-to-End Models\",\n author = \"Arora, Siddhant and\n Dalmia, Siddharth and\n Yan, Brian and\n Metze, Florian and\n Black, Alan W and\n Watanabe, Shinji\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.396/\",\n doi = \"10.18653/v1/2022.findings-emnlp.396\",\n pages = \"5419--5429\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.396.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.396/", + "pdf_size": 730389, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=948027656636841473&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Language Technologies Institute, Carnegie Mellon University, USA; Language Technologies Institute, Carnegie Mellon University, USA; Language Technologies Institute, Carnegie Mellon University, USA; Language Technologies Institute, Carnegie Mellon University, USA; Language Technologies Institute, Carnegie Mellon University, USA; Language Technologies Institute, Carnegie Mellon University, USA", + "aff_domain": "cs.cmu.edu;cs.cmu.edu; ; ; ; ", + "email": "cs.cmu.edu;cs.cmu.edu; ; ; ; ", + "github": "https://github.com/espnet/espnet", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Language Technologies Institute", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.20", + "title": "Tomayto, Tomahto. Beyond Token-level Answer Equivalence for Question Answering Evaluation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The predictions of question answering (QA) systems are typically evaluated against manually annotated finite sets of one or more answers. This leads to a coverage limitation that results in underestimating the true performance of systems, and is typically addressed by extending over exact match (EM) with predefined rules or with the token-level F1 measure.In this paper, we present the first systematic conceptual and data-driven analysis to examine the shortcomings of token-level equivalence measures.To this end, we define the asymmetric notion of answer equivalence (AE), accepting answers that are equivalent to or improve over the reference, and publish over 23k human judgements for candidates produced by multiple QA systems on SQuAD.Through a careful analysis of this data, we reveal and quantify several concrete limitations of the F1 measure, such as a false impression of graduality, or missing dependence on the question.Since collecting AE annotations for each evaluated model is expensive, we learn a BERT matching (BEM) measure to approximate this task. Being a simpler task than QA, we find BEM to provide significantly better AE approximations than F1, and to more accurately reflect the performance of systems.Finally, we demonstrate the practical utility of AE and BEM on the concrete application of minimal accurate prediction sets, reducing the number of required answers by up to X2.6.", + "author": "Jannis Bulian; Christian Buck; Wojciech Gajewski; Benjamin B\u00f6rschinger; Tal Schuster", + "authorids": "/j/jannis-bulian/; /c/christian-buck/; /w/wojciech-gajewski/; /b/benjamin-borschinger/; /t/tal-schuster/", + "bibtex": "@inproceedings{bulian-etal-2022-tomayto,\n title = \"Tomayto, Tomahto. Beyond Token-level Answer Equivalence for Question Answering Evaluation\",\n author = {Bulian, Jannis and\n Buck, Christian and\n Gajewski, Wojciech and\n B{\\\"o}rschinger, Benjamin and\n Schuster, Tal},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.20/\",\n doi = \"10.18653/v1/2022.emnlp-main.20\",\n pages = \"291--305\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.20.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.20/", + "pdf_size": 463689, + "gs_citation": 81, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2124667473371868545&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 6, + "aff": "Google Research; Google Research; Google Research; Google Research; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com", + "github": "https://github.com/google-research-datasets/answer-equivalence-dataset", + "project": "https://tfhub.dev/google/answer_equivalence/bem/1", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.792", + "title": "Topic Modeling With Topological Data Analysis", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent unsupervised topic modelling ap-proaches that use clustering techniques onword, token or document embeddings can ex-tract coherent topics. A common limitationof such approaches is that they reveal noth-ing about inter-topic relationships which areessential in many real-world application do-mains. We present an unsupervised topic mod-elling method which harnesses TopologicalData Analysis (TDA) to extract a topologicalskeleton of the manifold upon which contextu-alised word embeddings lie. We demonstratethat our approach, which performs on par witha recent baseline, is able to construct a networkof coherent topics together with meaningfulrelationships between them.", + "author": "Ciar\u00e1n Byrne; Danijela Horak; Karo Moilanen; Amandla Mabona", + "authorids": "/c/ciaran-byrne/; /d/danijela-horak/; /k/karo-moilanen/; /a/amandla-mabona/", + "bibtex": "@inproceedings{byrne-etal-2022-topic,\n title = \"Topic Modeling With Topological Data Analysis\",\n author = \"Byrne, Ciar{\\'a}n and\n Horak, Danijela and\n Moilanen, Karo and\n Mabona, Amandla\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.792/\",\n doi = \"10.18653/v1/2022.emnlp-main.792\",\n pages = \"11514--11533\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.792.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.792/", + "pdf_size": 5236261, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11192365629628716412&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "AIG; AIG; AIG; AIG", + "aff_domain": "gmail.com;gmail.com;gmail.com;gmail.com", + "email": "gmail.com;gmail.com;gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "American International Group", + "aff_unique_dep": "", + "aff_unique_url": "https://www.aig.com", + "aff_unique_abbr": "AIG", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.65", + "title": "Topic Modeling by Clustering Language Model Embeddings: Human Validation on an Industry Dataset", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Topic models are powerful tools to get an overview of large collections of text data, a situation that is prevalent in industry applications. A rising trend within topic modeling is to directly cluster dimension-reduced embeddings created with pretrained language models. It is difficult to evaluate these models because there is no ground truth and automatic measurements may not mimic human judgment. To address this problem, we created a tool called STELLAR for interactive topic browsing which we used for human evaluation of topics created from a real-world dataset used in industry. Embeddings created with BERT were used together with UMAP and HDBSCAN to model the topics. The human evaluation found that our topic model creates coherent topics. The following discussion revolves around the requirements of industry and what research is needed for production-ready systems.", + "author": "Anton Eklund; Mona Forsman", + "authorids": "/a/anton-eklund/; /m/mona-forsman/", + "bibtex": "@inproceedings{eklund-forsman-2022-topic,\n title = \"Topic Modeling by Clustering Language Model Embeddings: Human Validation on an Industry Dataset\",\n author = \"Eklund, Anton and\n Forsman, Mona\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.65/\",\n doi = \"10.18653/v1/2022.emnlp-industry.65\",\n pages = \"635--643\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.65.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.65/", + "pdf_size": 819635, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7643743431311068817&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Ume\u00e5 University + Adlede AB; Adlede AB", + "aff_domain": "cs.umu.se;adlede.com", + "email": "cs.umu.se;adlede.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+1;1", + "aff_unique_norm": "Ume\u00e5 University;Adlede AB", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.umu.se;", + "aff_unique_abbr": "UMU;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0", + "aff_country_unique": "Sweden" + }, + { + "id": "2022.findings-emnlp.122", + "title": "Topic Taxonomy Expansion via Hierarchy-Aware Topic Phrase Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Topic taxonomies display hierarchical topic structures of a text corpus and provide topical knowledge to enhance various NLP applications. To dynamically incorporate new topic information, several recent studies have tried to expand (or complete) a topic taxonomy by inserting emerging topics identified in a set of new documents. However, existing methods focus only on frequent terms in documents and the local topic-subtopic relations in a taxonomy, which leads to limited topic term coverage and fails to model the global taxonomy structure. In this work, we propose a novel framework for topic taxonomy expansion, named TopicExpan, which directly generates topic-related terms belonging to new topics. Specifically, TopicExpan leverages the hierarchical relation structure surrounding a new topic and the textual content of an input document for topic term generation. This approach encourages newly-inserted topics to further cover important but less frequent terms as well as to keep their relation consistency within the taxonomy. Experimental results on two real-world text corpora show that TopicExpan significantly outperforms other baseline methods in terms of the quality of output taxonomies.", + "author": "Dongha Lee; Jiaming Shen; Seonghyeon Lee; Susik Yoon; Hwanjo Yu; Jiawei Han", + "authorids": "/d/dongha-lee/; /j/jiaming-shen/; /s/seonghyeon-lee/; /s/susik-yoon/; /h/hwanjo-yu/; /j/jiawei-han/", + "bibtex": "@inproceedings{lee-etal-2022-topic,\n title = \"Topic Taxonomy Expansion via Hierarchy-Aware Topic Phrase Generation\",\n author = \"Lee, Dongha and\n Shen, Jiaming and\n Lee, Seonghyeon and\n Yoon, Susik and\n Yu, Hwanjo and\n Han, Jiawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.122/\",\n doi = \"10.18653/v1/2022.findings-emnlp.122\",\n pages = \"1687--1700\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.122.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.122/", + "pdf_size": 1092812, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7871246154160207360&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 6, + "aff": "University of Illinois at Urbana-Champaign (UIUC), Urbana, IL, United States; Google Research, New York, NY, United States; Pohang University of Science and Technology (POSTECH), Pohang, Republic of Korea; University of Illinois at Urbana-Champaign (UIUC), Urbana, IL, United States; Pohang University of Science and Technology (POSTECH), Pohang, Republic of Korea; University of Illinois at Urbana-Champaign (UIUC), Urbana, IL, United States", + "aff_domain": "illinois.edu;google.com;postech.ac.kr;illinois.edu;postech.ac.kr;illinois.edu", + "email": "illinois.edu;google.com;postech.ac.kr;illinois.edu;postech.ac.kr;illinois.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;0;2;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;Google Research;Pohang University of Science and Technology", + "aff_unique_dep": ";Google Research;", + "aff_unique_url": "https://illinois.edu;https://research.google;https://www.postech.ac.kr", + "aff_unique_abbr": "UIUC;Google Research;POSTECH", + "aff_campus_unique_index": "0;1;2;0;2;0", + "aff_campus_unique": "Urbana;New York;Pohang", + "aff_country_unique_index": "0;0;1;0;1;0", + "aff_country_unique": "United States;Republic of Korea" + }, + { + "id": "2022.findings-emnlp.533", + "title": "Topic-Aware Response Generation in Task-Oriented Dialogue with Unstructured Knowledge Access", + "track": "main", + "status": "finding", + "award": false, + "abstract": "To alleviate the problem of structured databases\u2019 limited coverage, recent task-oriented dialogue systems incorporate external unstructured knowledge to guide the generation of system responses. However, these usually use word or sentence level similarities to detect the relevant knowledge context, which only partially capture the topical level relevance. In this paper, we examine how to better integrate topical information in knowledge grounded task-oriented dialogue and propose \u201cTopic-Aware Response Generation\u201d (TARG), an end-to-end response generation model. TARG incorporates multiple topic-aware attention mechanisms to derive the importance weighting scheme over dialogue utterances and external knowledge sources towards a better understanding of the dialogue history. Experimental results indicate that TARG achieves state-of-the-art performance in knowledge selection and response generation, outperforming previous state-of-the-art by 3.2, 3.6, and 4.2 points in EM, F1 and BLEU-4 respectively on Doc2Dial, and performing comparably with previous work on DSTC9; both being knowledge-grounded task-oriented dialogue datasets.", + "author": "Yue Feng; Gerasimos Lampouras; Ignacio Iacobacci", + "authorids": "/y/yue-feng/; /g/gerasimos-lampouras/; /i/ignacio-iacobacci/", + "bibtex": "@inproceedings{feng-etal-2022-topic,\n title = \"Topic-Aware Response Generation in Task-Oriented Dialogue with Unstructured Knowledge Access\",\n author = \"Feng, Yue and\n Lampouras, Gerasimos and\n Iacobacci, Ignacio\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.533/\",\n doi = \"10.18653/v1/2022.findings-emnlp.533\",\n pages = \"7199--7211\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.533.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.533/", + "pdf_size": 556763, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9861146922107244177&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "University College London, London, UK + Huawei Noah\u2019s Ark Lab, London, UK; Huawei Noah\u2019s Ark Lab, London, UK; Huawei Noah\u2019s Ark Lab, London, UK", + "aff_domain": "ucl.ac.uk;huawei.com;huawei.com", + "email": "ucl.ac.uk;huawei.com;huawei.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;1;1", + "aff_unique_norm": "University College London;Huawei Noah\u2019s Ark Lab", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ucl.ac.uk;https://www.huawei.com/en/ai", + "aff_unique_abbr": "UCL;HNA Lab", + "aff_campus_unique_index": "0+0;0;0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0+0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.70", + "title": "Topic-Regularized Authorship Representation Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Authorship attribution is a task that aims to identify the author of a given piece of writing. We aim to develop a generalized solution that can handle a large number of texts from authors and topics unavailable in training data. Previous studies have proposed strategies to address only either unseen authors or unseen topics. Authorship representation learning has been shown to work in open-set environments with a large number of unseen authors but has not been explicitly designed for cross-topic environments at the same time. To handle a large number of unseen authors and topics, we propose Authorship Representation Regularization (ARR), a distillation framework that creates authorship representation with reduced reliance on topic-specific information. To assess the performance of our framework, we also propose a cross-topic-open-set evaluation method. Our proposed method has improved performances in the cross-topic-open set setup over baselines in 4 out of 6 cases.", + "author": "Jitkapat Sawatphol; Nonthakit Chaiwong; Can Udomcharoenchaikit; Sarana Nutanong", + "authorids": "/j/jitkapat-sawatphol/; /n/nonthakit-chaiwong/; /c/can-udomcharoenchaikit/; /s/sarana-nutanong/", + "bibtex": "@inproceedings{sawatphol-etal-2022-topic,\n title = \"Topic-Regularized Authorship Representation Learning\",\n author = \"Sawatphol, Jitkapat and\n Chaiwong, Nonthakit and\n Udomcharoenchaikit, Can and\n Nutanong, Sarana\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.70/\",\n doi = \"10.18653/v1/2022.emnlp-main.70\",\n pages = \"1076--1082\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.70.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.70/", + "pdf_size": 378194, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8233574053499925111&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.457", + "title": "Topical Segmentation of Spoken Narratives: A Test Case on Holocaust Survivor Testimonies", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The task of topical segmentation is well studied, but previous work has mostly addressed it in the context of structured, well-defined segments, such as segmentation into paragraphs, chapters, or segmenting text that originated from multiple sources. We tackle the task of segmenting running (spoken) narratives, which poses hitherto unaddressed challenges. As a test case, we address Holocaust survivor testimonies, given in English. Other than the importance of studying these testimonies for Holocaust research, we argue that they provide an interesting test case for topical segmentation, due to their unstructured surface level, relative abundance (tens of thousands of such testimonies were collected), and the relatively confined domain that they cover. We hypothesize that boundary points between segments correspond to low mutual information between the sentences proceeding and following the boundary. Based on this hypothesis, we explore a range of algorithmic approaches to the task, building on previous work on segmentation that uses generative Bayesian modeling and state-of-the-art neural machinery. Compared to manually annotated references, we find that the developed approaches show considerable improvements over previous work.", + "author": "Eitan Wagner; Renana Keydar; Amit Pinchevski; Omri Abend", + "authorids": "/e/eitan-wagner/; /r/renana-keydar/; /a/amit-pinchevski/; /o/omri-abend/", + "bibtex": "@inproceedings{wagner-etal-2022-topical,\n title = \"Topical Segmentation of Spoken Narratives: A Test Case on Holocaust Survivor Testimonies\",\n author = \"Wagner, Eitan and\n Keydar, Renana and\n Pinchevski, Amit and\n Abend, Omri\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.457/\",\n doi = \"10.18653/v1/2022.emnlp-main.457\",\n pages = \"6809--6821\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.457.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.457/", + "pdf_size": 340049, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16933194198188648811&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science; Faculty of Law and Digital Humanities; Department of Communication and Journalism; Department of Computer Science", + "aff_domain": "mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il", + "email": "mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il;mail.huji.ac.il", + "github": "https://github.com/eitanwagner/holocaust-segmentation", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;2;0", + "aff_unique_norm": "Unknown Institution;Faculty of Law and Digital Humanities;University Affiliation", + "aff_unique_dep": "Department of Computer Science;Law and Digital Humanities;Department of Communication and Journalism", + "aff_unique_url": ";;", + "aff_unique_abbr": ";;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "", + "aff_country_unique": "" + }, + { + "id": "2022.emnlp-main.8", + "title": "Toward Unifying Text Segmentation and Long Document Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Text segmentation is important for signaling a document\u2019s structure. Without segmenting a long document into topically coherent sections, it is difficult for readers to comprehend the text, let alone find important information. The problem is only exacerbated by a lack of segmentation in transcripts of audio/video recordings. In this paper, we explore the role that section segmentation plays in extractive summarization of written and spoken documents. Our approach learns robust sentence representations by performing summarization and segmentation simultaneously, which is further enhanced by an optimization-based regularizer to promote selection of diverse summary sentences. We conduct experiments on multiple datasets ranging from scientific articles to spoken transcripts to evaluate the model\u2019s performance. Our findings suggest that the model can not only achieve state-of-the-art performance on publicly available benchmarks, but demonstrate better cross-genre transferability when equipped with text segmentation. We perform a series of analyses to quantify the impact of section segmentation on summarizing written and spoken documents of substantial length and complexity.", + "author": "Sangwoo Cho; Kaiqiang Song; Xiaoyang Wang; Fei Liu; Dong Yu", + "authorids": "/s/sangwoo-cho/; /k/kaiqiang-song/; /x/xiaoyang-wang/; /f/fei-liu/; /d/dong-yu/", + "bibtex": "@inproceedings{cho-etal-2022-toward,\n title = \"Toward Unifying Text Segmentation and Long Document Summarization\",\n author = \"Cho, Sangwoo and\n Song, Kaiqiang and\n Wang, Xiaoyang and\n Liu, Fei and\n Yu, Dong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.8/\",\n doi = \"10.18653/v1/2022.emnlp-main.8\",\n pages = \"106--118\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.8.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.8/", + "pdf_size": 885240, + "gs_citation": 42, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10603092825533455327&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Tencent AI Lab, Bellevue, WA; Tencent AI Lab, Bellevue, WA; Tencent AI Lab, Bellevue, WA; Department of Computer Science, Emory University, Atlanta, GA; Tencent AI Lab, Bellevue, WA", + "aff_domain": "global.tencent.com;global.tencent.com;global.tencent.com;emory.edu;global.tencent.com", + "email": "global.tencent.com;global.tencent.com;global.tencent.com;emory.edu;global.tencent.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0", + "aff_unique_norm": "Tencent;Emory University", + "aff_unique_dep": "AI Lab;Department of Computer Science", + "aff_unique_url": "https://ai.tencent.com;https://www.emory.edu", + "aff_unique_abbr": "Tencent AI Lab;Emory", + "aff_campus_unique_index": "0;0;0;1;0", + "aff_campus_unique": "Bellevue;Atlanta", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.400", + "title": "Toward the Limitation of Code-Switching in Cross-Lingual Transfer", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multilingual pretrained models have shown strong cross-lingual transfer ability. Some works used code-switching sentences, which consist of tokens from multiple languages, to enhance the cross-lingual representation further, and have shown success in many zero-shot cross-lingual tasks. However, code-switched tokens are likely to cause grammatical incoherence in newly substituted sentences, and negatively affect the performance on token-sensitive tasks, such as Part-of-Speech (POS) tagging and Named-Entity-Recognition (NER). This paper mitigates the limitation of the code-switching method by not only making the token replacement but considering the similarity between the context and the switched tokens so that the newly substituted sentences are grammatically consistent during both training and inference. We conduct experiments on cross-lingual POS and NER over 30+ languages, and demonstrate the effectiveness of our method by outperforming the mBERT by 0.95 and original code-switching method by 1.67 on F1 scores.", + "author": "Yukun Feng; Feng Li; Philipp Koehn", + "authorids": "/y/yukun-feng/; /f/feng-li/; /p/philipp-koehn/", + "bibtex": "@inproceedings{feng-etal-2022-toward,\n title = \"Toward the Limitation of Code-Switching in Cross-Lingual Transfer\",\n author = \"Feng, Yukun and\n Li, Feng and\n Koehn, Philipp\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.400/\",\n doi = \"10.18653/v1/2022.emnlp-main.400\",\n pages = \"5966--5971\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.400.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.400/", + "pdf_size": 298275, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15980889074216490825&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "Johns Hopkins University; University of Illinois Urbana-Champaign; Johns Hopkins University", + "aff_domain": "jhu.edu;illinois.edu;jhu.edu", + "email": "jhu.edu;illinois.edu;jhu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Johns Hopkins University;University of Illinois at Urbana-Champaign", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.jhu.edu;https://illinois.edu", + "aff_unique_abbr": "JHU;UIUC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Urbana-Champaign", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.568", + "title": "Towards Better Document-level Relation Extraction via Iterative Inference", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Document-level relation extraction (RE) aims to extract the relations between entities from the input document that usually containing many difficultly-predicted entity pairs whose relations can only be predicted through relational inference. Existing methods usually directly predict the relations of all entity pairs of input document in a one-pass manner, ignoring the fact that predictions of some entity pairs heavily depend on the predicted results of other pairs. To deal with this issue, in this paper, we propose a novel document-level RE model with iterative inference. Our model is mainly composed of two modules: 1) a base module expected to provide preliminary relation predictions on entity pairs; 2) an inference module introduced to refine these preliminary predictions by iteratively dealing with difficultly-predicted entity pairs depending on other pairs in an easy-to-hard manner. Unlike previous methods which only consider feature information of entity pairs, our inference module is equipped with two Extended Cross Attention units, allowing it to exploit both feature information and previous predictions of entity pairs during relational inference. Furthermore, we adopt a two-stage strategy to train our model. At the first stage, we only train our base module. During the second stage, we train the whole model, where contrastive learning is introduced to enhance the training of inference module. Experimental results on three commonly-used datasets show that our model consistently outperforms other competitive baselines.", + "author": "Liang Zhang; Jinsong Su; Yidong Chen; Zhongjian Miao; Min Zijun; Qingguo Hu; Xiaodong Shi", + "authorids": "/l/liang-zhang/; /j/jinsong-su/; /y/yidong-chen/; /z/zhongjian-miao/; /m/min-zijun/; /q/qingguo-hu/; /x/xiaodong-shi/", + "bibtex": "@inproceedings{zhang-etal-2022-towards-better,\n title = \"Towards Better Document-level Relation Extraction via Iterative Inference\",\n author = \"Zhang, Liang and\n Su, Jinsong and\n Chen, Yidong and\n Miao, Zhongjian and\n Zijun, Min and\n Hu, Qingguo and\n Shi, Xiaodong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.568/\",\n doi = \"10.18653/v1/2022.emnlp-main.568\",\n pages = \"8306--8317\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.568.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.568/", + "pdf_size": 864118, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3582763365068140463&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China; 1School of Informatics, Xiamen University, China+2Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan (Xiamen University), Ministry of Culture and Tourism, China", + "aff_domain": "stu.xmu.edu.cn;xmu.edu.cn;xmu.edu.cn; ; ; ; ", + "email": "stu.xmu.edu.cn;xmu.edu.cn;xmu.edu.cn; ; ; ; ", + "github": "https://github.com/DeepLearnXMU/DocRE-II", + "project": "", + "author_num": 7, + "aff_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0;0+0", + "aff_unique_norm": "Xiamen University", + "aff_unique_dep": "School of Informatics", + "aff_unique_url": "https://www.xmu.edu.cn", + "aff_unique_abbr": "", + "aff_campus_unique_index": ";;;;;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.159", + "title": "Towards Climate Awareness in NLP Research", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The climate impact of AI, and NLP research in particular, has become a serious issue given the enormous amount of energy that is increasingly being used for training and running computational models. Consequently, increasing focus is placed on efficient NLP. However, this important initiative lacks simple guidelines that would allow for systematic climate reporting of NLP research. We argue that this deficiency is one of the reasons why very few publications in NLP report key figures that would allow a more thorough examination of environmental impact, and present a quantitative survey to demonstrate this. As a remedy, we propose a climate performance model card with the primary purpose of being practically usable with only limited information about experiments and the underlying computer hardware. We describe why this step is essential to increase awareness about the environmental impact of NLP research and, thereby, paving the way for more thorough discussions.", + "author": "Daniel Hershcovich; Nicolas Webersinke; Mathias Kraus; Julia Bingler; Markus Leippold", + "authorids": "/d/daniel-hershcovich/; /n/nicolas-webersinke/; /m/mathias-kraus/; /j/julia-bingler/; /m/markus-leippold/", + "bibtex": "@inproceedings{hershcovich-etal-2022-towards,\n title = \"Towards Climate Awareness in {NLP} Research\",\n author = \"Hershcovich, Daniel and\n Webersinke, Nicolas and\n Kraus, Mathias and\n Bingler, Julia and\n Leippold, Markus\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.159/\",\n doi = \"10.18653/v1/2022.emnlp-main.159\",\n pages = \"2480--2494\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.159.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.159/", + "pdf_size": 960814, + "gs_citation": 41, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15635524763720561187&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff": "University of Copenhagen; FAU Erlangen-Nuremberg; FAU Erlangen-Nuremberg; ETH Zurich; University of Zurich", + "aff_domain": "di.ku.dk;fau.de;fau.de;ethz.ch;bf.uzh.ch", + "email": "di.ku.dk;fau.de;fau.de;ethz.ch;bf.uzh.ch", + "github": "https://github.com/danielhers/climate-awareness-nlp", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;2;3", + "aff_unique_norm": "University of Copenhagen;Friedrich-Alexander-Universit\u00e4t Erlangen-N\u00fcrnberg;ETH Zurich;University of Zurich", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.ku.dk;https://www.fau.de;https://www.ethz.ch;https://www.unizh.ch", + "aff_unique_abbr": "UCPH;FAU;ETHZ;UZH", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;2;2", + "aff_country_unique": "Denmark;Germany;Switzerland" + }, + { + "id": "2022.emnlp-main.737", + "title": "Towards Compositional Generalization in Code Search", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We study compositional generalization, which aims to generalize on unseen combinations of seen structural elements, for code search. Unlike existing approaches of partially pursuing this goal, we study how to extract structural elements, which we name a template that directly targets compositional generalization. Thus we propose CTBERT, or Code Template BERT, representing codes using automatically extracted templates as building blocks. We empirically validate CTBERT on two public code search benchmarks, AdvTest and CSN. Further, we show that templates are complementary to data flow graphs in GraphCodeBERT, by enhancing structural context around variables.", + "author": "Hojae Han; Seung-won Hwang; Shuai Lu; Nan Duan; Seungtaek Choi", + "authorids": "/h/hojae-han/; /s/seung-won-hwang/; /s/shuai-lu/; /n/nan-duan/; /s/seungtaek-choi/", + "bibtex": "@inproceedings{han-etal-2022-towards,\n title = \"Towards Compositional Generalization in Code Search\",\n author = \"Han, Hojae and\n Hwang, Seung-won and\n Lu, Shuai and\n Duan, Nan and\n Choi, Seungtaek\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.737/\",\n doi = \"10.18653/v1/2022.emnlp-main.737\",\n pages = \"10743--10750\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.737.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.737/", + "pdf_size": 753731, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4922887057499815490&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 2, + "aff": "Seoul National University\u2660; Seoul National University\u2660\u2020; Microsoft Research Asia\u22c4; Microsoft Research Asia\u22c4; Riiid\u2663", + "aff_domain": "snu.ac.kr;snu.ac.kr;microsoft.com;microsoft.com;riiid.co", + "email": "snu.ac.kr;snu.ac.kr;microsoft.com;microsoft.com;riiid.co", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;1;2", + "aff_unique_norm": "Seoul National University;Microsoft Research;Riiid", + "aff_unique_dep": ";Microsoft Research;", + "aff_unique_url": "https://www.snu.ac.kr;https://www.microsoft.com/en-us/research/group/microsoft-research-asia;https://www.riiid.com", + "aff_unique_abbr": "SNU;MSRA;Riiid", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0;0;1;1;0", + "aff_country_unique": "South Korea;China" + }, + { + "id": "2022.emnlp-main.683", + "title": "Towards Efficient Dialogue Pre-training with Transferable and Interpretable Latent Structure", + "track": "main", + "status": "Main", + "award": false, + "abstract": "With the availability of massive general-domain dialogue data, pre-trained dialogue generation appears to be super appealing to transfer knowledge from the general domain to downstream applications. In most existing work, such transferable ability is mainly obtained by fitting a large model with hundreds of millions of parameters on massive data in an exhaustive way, leading to inefficient running and poor interpretability. This paper proposes a novel dialogue generation model with a latent structure that is easily transferable from the general domain to downstream tasks in a lightweight and transparent way. Experiments on two benchmarks validate the effectiveness of the proposed model. Thanks to the transferable latent structure, our model is able to yield better dialogue responses than four strong baselines in terms of both automatic and human evaluations, and our model with about 22% parameters particularly delivers a 5x speedup in running time compared with the strongest baseline. Moreover, the proposed model is explainable by interpreting the discrete latent variables.", + "author": "Xueliang Zhao; Lemao Liu; Tingchen Fu; Shuming Shi; Dongyan Zhao; Rui Yan", + "authorids": "/x/xueliang-zhao/; /l/lemao-liu/; /t/tingchen-fu/; /s/shuming-shi/; /d/dongyan-zhao/; /r/rui-yan/", + "bibtex": "@inproceedings{zhao-etal-2022-towards,\n title = \"Towards Efficient Dialogue Pre-training with Transferable and Interpretable Latent Structure\",\n author = \"Zhao, Xueliang and\n Liu, Lemao and\n Fu, Tingchen and\n Shi, Shuming and\n Zhao, Dongyan and\n Yan, Rui\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.683/\",\n doi = \"10.18653/v1/2022.emnlp-main.683\",\n pages = \"10051--10063\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.683.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.683/", + "pdf_size": 436162, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10552576166902357046&as_sdt=8005&sciodt=0,7&hl=en", + "gs_version_total": 3, + "aff": "Wangxuan Institute of Computer Technology, Peking University + Center for Data Science, AAIS, Peking University + Beijing Institute for General Artificial Intelligence; Tencent AI Lab; Gaoling School of Artificial Intelligence, Renmin University of China; Tencent AI Lab; Wangxuan Institute of Computer Technology, Peking University + Beijing Institute for General Artificial Intelligence; Gaoling School of Artificial Intelligence, Renmin University of China", + "aff_domain": "pku.edu.cn;tencent.com;gmail.com;tencent.com;pku.edu.cn;ruc.edu.cn", + "email": "pku.edu.cn;tencent.com;gmail.com;tencent.com;pku.edu.cn;ruc.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+0+1;2;3;2;0+1;3", + "aff_unique_norm": "Peking University;Beijing Institute for General Artificial Intelligence;Tencent;Renmin University of China", + "aff_unique_dep": "Wangxuan Institute of Computer Technology;;Tencent AI Lab;Gaoling School of Artificial Intelligence", + "aff_unique_url": "http://www.pku.edu.cn;http://www.bigaiai.org/;https://ai.tencent.com;http://www.ruc.edu.cn", + "aff_unique_abbr": "PKU;BIGAI;Tencent AI Lab;RUC", + "aff_campus_unique_index": "1;1;;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0+0;0;0;0;0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.126", + "title": "Towards Explaining Subjective Ground of Individuals on Social Media", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large-scale language models have been reducing the gap between machines and humans in understanding the real world, yet understanding an individual\u2019s theory of mind and behavior from text is far from being resolved. This research proposes a neural model\u2014Subjective Ground Attention\u2014that learns subjective grounds of individuals and accounts for their judgments on situations of others posted on social media. Using simple attention modules as well as taking one\u2019s previous activities into consideration, we empirically show that our model provides human-readable explanations of an individual\u2019s subjective preference in judging social situations. We further qualitatively evaluate the explanations generated by the model and claim that our model learns an individual\u2019s subjective orientation towards abstract moral concepts.", + "author": "Younghun Lee; Dan Goldwasser", + "authorids": "/y/younghun-lee/; /d/dan-goldwasser/", + "bibtex": "@inproceedings{lee-goldwasser-2022-towards,\n title = \"Towards Explaining Subjective Ground of Individuals on Social Media\",\n author = \"Lee, Younghun and\n Goldwasser, Dan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.126/\",\n doi = \"10.18653/v1/2022.findings-emnlp.126\",\n pages = \"1752--1766\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.126.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.126/", + "pdf_size": 2219319, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6959205731089204252&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, Purdue University; Department of Computer Science, Purdue University", + "aff_domain": "purdue.edu;purdue.edu", + "email": "purdue.edu;purdue.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Purdue University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.purdue.edu", + "aff_unique_abbr": "Purdue", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.155", + "title": "Towards Generalizable and Robust Text-to-SQL Parsing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Text-to-SQL parsing tackles the problem of mapping natural language questions to executable SQL queries. In practice, text-to-SQL parsers often encounter various challenging scenarios, requiring them to be generalizable and robust. While most existing work addresses a particular generalization or robustness challenge, we aim to study it in a more comprehensive manner. In specific, we believe that text-to-SQL parsers should be (1) generalizable at three levels of generalization, namely i.i.d., zero-shot, and compositional, and (2) robust against input perturbations. To enhance these capabilities of the parser, we propose a novel TKK framework consisting of Task decomposition, Knowledge acquisition, and Knowledge composition to learn text-to-SQL parsing in stages. By dividing the learning process into multiple stages, our framework improves the parser\u2019s ability to acquire general SQL knowledge instead of capturing spurious patterns, making it more generalizable and robust. Experimental results under various generalization and robustness settings show that our framework is effective in all scenarios and achieves state-of-the-art performance on the Spider, SParC, and CoSQL datasets.", + "author": "Chang Gao; Bowen Li; Wenxuan Zhang; Wai Lam; Binhua Li; Fei Huang; Luo Si; Yongbin Li", + "authorids": "/c/chang-gao/; /b/bowen-li/; /w/wenxuan-zhang/; /w/wai-lam/; /b/binhua-li/; /f/fei-huang/; /l/luo-si/; /y/yongbin-li/", + "bibtex": "@inproceedings{gao-etal-2022-towards-generalizable,\n title = \"Towards Generalizable and Robust Text-to-{SQL} Parsing\",\n author = \"Gao, Chang and\n Li, Bowen and\n Zhang, Wenxuan and\n Lam, Wai and\n Li, Binhua and\n Huang, Fei and\n Si, Luo and\n Li, Yongbin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.155/\",\n doi = \"10.18653/v1/2022.findings-emnlp.155\",\n pages = \"2113--2125\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.155.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.155/", + "pdf_size": 351281, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16667729949027014147&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "The Chinese University of Hong Kong; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; The Chinese University of Hong Kong + DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group", + "aff_domain": "se.cuhk.edu.hk;gmail.com;alibaba-inc.com;se.cuhk.edu.hk;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "se.cuhk.edu.hk;gmail.com;alibaba-inc.com;se.cuhk.edu.hk;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "https://github.com/AlibabaResearch/DAMO-ConvAI/tree/main/tkk", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;1;0+1;1;1;1;1", + "aff_unique_norm": "The Chinese University of Hong Kong;Alibaba Group", + "aff_unique_dep": ";DAMO Academy", + "aff_unique_url": "https://www.cuhk.edu.hk;https://www.alibaba-group.com", + "aff_unique_abbr": "CUHK;Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0+0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.103", + "title": "Towards Generalized Open Information Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Open Information Extraction (OpenIE) facilitates the open-domain discovery of textual facts. However, the prevailing solutions evaluate OpenIE models on in-domain test sets aside from the training corpus, which certainly violates the initial task principle of domain-independence. In this paper, we propose to advance OpenIE towards a more realistic scenario: generalizing over unseen target domains with different data distributions from the source training domains, termed Generalized OpenIE. For this purpose, we first introduce GLOBE, a large-scale human-annotated multi-domain OpenIE benchmark, to examine the robustness of recent OpenIE models to domain shifts, and the relative performance degradation of up to 70% implies the challenges of generalized OpenIE. Then, we propose DragonIE, which explores a minimalist expression of textual fact: directed acyclic graph, to improve the OpenIE generalization ability. Extensive experiments demonstrate that DragonIE beats the previous methods in both in-domain and out-of-domain settings by as much as 6.0% in F1 score absolutely, but there is still ample room for improvement.", + "author": "Bowen Yu; Zhenyu Zhang; Jingyang Li; Haiyang Yu; Tingwen Liu; Jian Sun; Yongbin Li; Bin Wang", + "authorids": "/b/bowen-yu/; /z/zhenyu-zhang/; /j/jingyang-li/; /h/haiyang-yu/; /t/tingwen-liu/; /j/jian-sun/; /y/yongbin-li/; /b/bin-wang/", + "bibtex": "@inproceedings{yu-etal-2022-towards,\n title = \"Towards Generalized Open Information Extraction\",\n author = \"Yu, Bowen and\n Zhang, Zhenyu and\n Li, Jingyang and\n Yu, Haiyang and\n Liu, Tingwen and\n Sun, Jian and\n Li, Yongbin and\n Wang, Bin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.103/\",\n doi = \"10.18653/v1/2022.findings-emnlp.103\",\n pages = \"1439--1453\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.103.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.103/", + "pdf_size": 476301, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13584775024268223267&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.262", + "title": "Towards Identifying Social Bias in Dialog Systems: Framework, Dataset, and Benchmark", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Among all the safety concerns that hinder the deployment of open-domain dialog systems (e.g., offensive languages, biases, and toxic behaviors), social bias presents an insidious challenge. Addressing this challenge requires rigorous analyses and normative reasoning. In this paper, we focus our investigation on social bias measurement to facilitate the development of unbiased dialog systems. We first propose a novel Dial-Bias Framework for analyzing the social bias in conversations using a holistic method beyond bias lexicons or dichotomous annotations. Leveraging the proposed framework, we further introduce the CDial-Bias Dataset which is, to the best of our knowledge, the first annotated Chinese social bias dialog dataset. We also establish a fine-grained dialog bias measurement benchmark and conduct in-depth ablation studies to shed light on the utility of the detailed annotations in the proposed dataset. Finally, we evaluate representative Chinese generative models with our classifiers to unveil the presence of social bias in these systems.", + "author": "Jingyan Zhou; Jiawen Deng; Fei Mi; Yitong Li; Yasheng Wang; Minlie Huang; Xin Jiang; Qun Liu; Helen Meng", + "authorids": "/j/jingyan-zhou/; /j/jiawen-deng/; /f/fei-mi/; /y/yitong-li/; /y/yasheng-wang/; /m/minlie-huang/; /x/xin-jiang/; /q/qun-liu/; /h/helen-meng/", + "bibtex": "@inproceedings{zhou-etal-2022-towards-identifying,\n title = \"Towards Identifying Social Bias in Dialog Systems: Framework, Dataset, and Benchmark\",\n author = \"Zhou, Jingyan and\n Deng, Jiawen and\n Mi, Fei and\n Li, Yitong and\n Wang, Yasheng and\n Huang, Minlie and\n Jiang, Xin and\n Liu, Qun and\n Meng, Helen\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.262/\",\n doi = \"10.18653/v1/2022.findings-emnlp.262\",\n pages = \"3576--3591\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.262.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.262/", + "pdf_size": 992803, + "gs_citation": 30, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9071514080407378014&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;;", + "aff_domain": ";;;;;;;;", + "email": ";;;;;;;;", + "github": "https://github.com/para-zhou/CDial-Bias", + "project": "", + "author_num": 9 + }, + { + "id": "2022.findings-emnlp.208", + "title": "Towards Intelligent Clinically-Informed Language Analyses of People with Bipolar Disorder and Schizophrenia", + "track": "main", + "status": "finding", + "award": false, + "abstract": "NLP offers a myriad of opportunities to support mental health research. However, prior work has almost exclusively focused on social media data, for which diagnoses are difficult or impossible to validate. We present a first-of-its-kind dataset of manually transcribed interactions with people clinically diagnosed with bipolar disorder and schizophrenia, as well as healthy controls. Data was collected through validated clinical tasks and paired with diagnostic measures. We extract 100+ temporal, sentiment, psycholinguistic, emotion, and lexical features from the data and establish classification validity using a variety of models to study language differences between diagnostic groups. Our models achieve strong classification performance (maximum F1=0.93-0.96), and lead to the discovery of interesting associations between linguistic features and diagnostic class. It is our hope that this dataset will offer high value to clinical and NLP researchers, with potential for widespread broader impacts.", + "author": "Ankit Aich; Avery Quynh; Varsha Badal; Amy Pinkham; Philip Harvey; Colin Depp; Natalie Parde", + "authorids": "/a/ankit-aich/; /a/avery-quynh/; /v/varsha-badal/; /a/amy-pinkham/; /p/philip-harvey/; /c/colin-depp/; /n/natalie-parde/", + "bibtex": "@inproceedings{aich-etal-2022-towards,\n title = \"Towards Intelligent Clinically-Informed Language Analyses of People with Bipolar Disorder and Schizophrenia\",\n author = \"Aich, Ankit and\n Quynh, Avery and\n Badal, Varsha and\n Pinkham, Amy and\n Harvey, Philip and\n Depp, Colin and\n Parde, Natalie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.208/\",\n doi = \"10.18653/v1/2022.findings-emnlp.208\",\n pages = \"2871--2887\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.208.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.208/", + "pdf_size": 634313, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1403395882318099883&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Department of Computer Science, University of Illinois Chicago; Department of Psychiatry, University of California San Diego; Department of Psychiatry, University of California San Diego; School of Behavioral and Brain Sciences, The University of Texas at Dallas; University of Miami Miller School of Medicine; Department of Psychiatry, University of California San Diego; Department of Computer Science, University of Illinois Chicago", + "aff_domain": "uic.edu;health.ucsd.edu;health.ucsd.edu;utdallas.edu;miami.edu;health.ucsd.edu;uic.edu", + "email": "uic.edu;health.ucsd.edu;health.ucsd.edu;utdallas.edu;miami.edu;health.ucsd.edu;uic.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;1;2;3;1;0", + "aff_unique_norm": "University of Illinois Chicago;University of California San Diego;The University of Texas at Dallas;University of Miami", + "aff_unique_dep": "Department of Computer Science;Department of Psychiatry;School of Behavioral and Brain Sciences;Miller School of Medicine", + "aff_unique_url": "https://www.uic.edu;https://ucsd.edu;https://www.utdallas.edu;https://www.miami.edu", + "aff_unique_abbr": "UIC;UCSD;UT Dallas;UM", + "aff_campus_unique_index": "0;1;1;2;3;1;0", + "aff_campus_unique": "Chicago;San Diego;Dallas;Miami", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.297", + "title": "Towards Intention Understanding in Suicidal Risk Assessment with Natural Language Processing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recent applications of natural language processing techniques to suicidal ideation detection and risk assessment frame the detection or assessment task as a text classification problem. Recent advances have developed many models, especially deep learning models, to boost predictive performance.Though the performance (in terms of aggregated evaluation scores) is improving, this position paper urges that better intention understanding is required for reliable suicidal risk assessment with computational methods. This paper reflects the state of natural language processing applied to suicide-associated text classification tasks, differentiates suicidal risk assessment and intention understanding, and points out potential limitations of sentiment features and pretrained language models in suicidal intention understanding.Besides, it urges the necessity for sequential intention understanding and risk assessment, discusses some critical issues in evaluation such as uncertainty, and studies the lack of benchmarks.", + "author": "Shaoxiong Ji", + "authorids": "/s/shaoxiong-ji/", + "bibtex": "@inproceedings{ji-2022-towards,\n title = \"Towards Intention Understanding in Suicidal Risk Assessment with Natural Language Processing\",\n author = \"Ji, Shaoxiong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.297/\",\n doi = \"10.18653/v1/2022.findings-emnlp.297\",\n pages = \"4028--4038\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.297.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.297/", + "pdf_size": 292976, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3412683070521234250&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Aalto University", + "aff_domain": "aalto.fi", + "email": "aalto.fi", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "Aalto University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.aalto.fi", + "aff_unique_abbr": "Aalto", + "aff_country_unique_index": "0", + "aff_country_unique": "Finland" + }, + { + "id": "2022.emnlp-main.613", + "title": "Towards Inter-character Relationship-driven Story Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we introduce the task of modeling interpersonal relationships for story generation. For addressing this task, we propose Relationships as Latent Variables for Story Generation, (ReLiSt). ReLiSt generates stories sentence by sentence and has two major components - a relationship selector and a story continuer. The relationship selector specifies a latent variable to pick the relationship to exhibit in the next sentence and the story continuer generates the next sentence while expressing the selected relationship in a coherent way. Our automatic and human evaluations demonstrate that ReLiSt is able to generate stories with relationships that are more faithful to desired relationships while maintaining the content quality. The relationship assignments to sentences during inference brings interpretability to ReLiSt.", + "author": "Anvesh Rao Vijjini; Faeze Brahman; Snigdha Chaturvedi", + "authorids": "/a/anvesh-rao-vijjini/; /f/faeze-brahman/; /s/snigdha-chaturvedi/", + "bibtex": "@inproceedings{vijjini-etal-2022-towards,\n title = \"Towards Inter-character Relationship-driven Story Generation\",\n author = \"Vijjini, Anvesh Rao and\n Brahman, Faeze and\n Chaturvedi, Snigdha\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.613/\",\n doi = \"10.18653/v1/2022.emnlp-main.613\",\n pages = \"8970--8987\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.613.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.613/", + "pdf_size": 1002315, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10496182305749367342&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "UNC Chapel Hill; Allen Institute for Artificial Intelligence + Paul G. Allen School of Computer Science & Engineering, University of Washington; UNC Chapel Hill", + "aff_domain": "cs.unc.edu;allenai.org;cs.unc.edu", + "email": "cs.unc.edu;allenai.org;cs.unc.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;0", + "aff_unique_norm": "University of North Carolina at Chapel Hill;Allen Institute for Artificial Intelligence;University of Washington", + "aff_unique_dep": ";;Paul G. Allen School of Computer Science & Engineering", + "aff_unique_url": "https://www.unc.edu;https://allenai.org;https://www.washington.edu", + "aff_unique_abbr": "UNC;AI2;UW", + "aff_campus_unique_index": "0;2;0", + "aff_campus_unique": "Chapel Hill;;Seattle", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.316", + "title": "Towards Interactivity and Interpretability: A Rationale-based Legal Judgment Prediction Framework", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Legal judgment prediction (LJP) is a fundamental task in legal AI, which aims to assist the judge to hear the case and determine the judgment. The legal judgment usually consists of the law article, charge, and term of penalty. In the real trial scenario, the judge usually makes the decision step-by-step: first concludes the rationale according to the case\u2019s facts and then determines the judgment. Recently, many models have been proposed and made tremendous progress in LJP, but most of them adopt an end-to-end manner that cannot be manually intervened by the judge for practical use. Moreover, existing models lack interpretability due to the neglect of rationale in the prediction process. Following the judge\u2019s real trial logic, in this paper, we propose a novel Rationale-based Legal Judgment Prediction (RLJP) framework. In the RLJP framework, the LJP process is split into two steps. In the first phase, the model generates the rationales according to the fact description. Then it predicts the judgment based on the fact and the generated rationales. Extensive experiments on a real-world dataset show RLJP achieves the best results compared to the state-of-the-art models. Meanwhile, the proposed framework provides good interactivity and interpretability which enables practical use.", + "author": "Yiquan Wu; Yifei Liu; Weiming Lu; Yating Zhang; Jun Feng; Changlong Sun; Fei Wu; Kun Kuang", + "authorids": "/y/yiquan-wu/; /y/yifei-liu/; /w/weiming-lu/; /y/yating-zhang/; /j/jun-feng/; /c/changlong-sun/; /f/fei-wu/; /k/kun-kuang/", + "bibtex": "@inproceedings{wu-etal-2022-towards,\n title = \"Towards Interactivity and Interpretability: A Rationale-based Legal Judgment Prediction Framework\",\n author = \"Wu, Yiquan and\n Liu, Yifei and\n Lu, Weiming and\n Zhang, Yating and\n Feng, Jun and\n Sun, Changlong and\n Wu, Fei and\n Kuang, Kun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.316/\",\n doi = \"10.18653/v1/2022.emnlp-main.316\",\n pages = \"4787--4799\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.316.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.316/", + "pdf_size": 732885, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14095698308942745534&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Zhejiang University; Zhejiang University; Zhejiang University; Alibaba Group; State Grid Zhejiang Electric Power Co., LTD; Zhejiang University+Alibaba Group; Zhejiang University; Zhejiang University", + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;gmail.com;gmail.com;taobao.com;cs.zju.edu.cn;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;gmail.com;gmail.com;taobao.com;cs.zju.edu.cn;zju.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;1;2;0+1;0;0", + "aff_unique_norm": "Zhejiang University;Alibaba Group;State Grid Zhejiang Electric Power Company", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.zju.edu.cn;https://www.alibaba.com;http://www.zj.sgcc.com.cn", + "aff_unique_abbr": "ZJU;Alibaba;SG Zhejiang", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0+0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.350", + "title": "Towards Knowledge-Intensive Text-to-SQL Semantic Parsing with Formulaic Knowledge", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we study the problem of knowledge-intensive text-to-SQL, in which domain knowledge is necessary to parse expert questions into SQL queries over domain-specific tables. We formalize this scenario by building a new benchmark KnowSQL consisting of domain-specific questions covering various domains. We then address this problem by representing formulaic knowledge rather than by annotating additional data examples. More concretely, we construct a formulaic knowledge bank as a domain knowledge base and propose a framework (ReGrouP) to leverage this formulaic knowledge during parsing. Experiments using ReGrouP demonstrate a significant 28.2% improvement overall on KnowSQL.", + "author": "Longxu Dou; Yan Gao; Xuqi Liu; Mingyang Pan; Dingzirui Wang; Wanxiang Che; Dechen Zhan; Min-Yen Kan; Jian-Guang Lou", + "authorids": "/l/longxu-dou/; /y/yan-gao/; /x/xuqi-liu/; /m/mingyang-pan/; /d/dingzirui-wang/; /w/wanxiang-che/; /d/dechen-zhan/; /m/min-yen-kan/; /j/jian-guang-lou/", + "bibtex": "@inproceedings{dou-etal-2022-towards,\n title = \"Towards Knowledge-Intensive Text-to-{SQL} Semantic Parsing with Formulaic Knowledge\",\n author = \"Dou, Longxu and\n Gao, Yan and\n Liu, Xuqi and\n Pan, Mingyang and\n Wang, Dingzirui and\n Che, Wanxiang and\n Zhan, Dechen and\n Kan, Min-Yen and\n Lou, Jian-Guang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.350/\",\n doi = \"10.18653/v1/2022.emnlp-main.350\",\n pages = \"5240--5253\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.350.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.350/", + "pdf_size": 887964, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2552784441167834049&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Harbin Institute of Technology; Microsoft Research Asia; Harbin Institute of Technology; Harbin Institute of Technology; Harbin Institute of Technology; Harbin Institute of Technology; National University of Singapore; Harbin Institute of Technology; Microsoft Research Asia", + "aff_domain": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;hit.edu.cn;microsoft.com;microsoft.com;comp.nus.edu.sg", + "email": "ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;ir.hit.edu.cn;hit.edu.cn;microsoft.com;microsoft.com;comp.nus.edu.sg", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;0;0;0;0;2;0;1", + "aff_unique_norm": "Harbin Institute of Technology;Microsoft Research;National University of Singapore", + "aff_unique_dep": ";Research;", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.microsoft.com/en-us/research/group/asia;https://www.nus.edu.sg", + "aff_unique_abbr": "HIT;MSR Asia;NUS", + "aff_campus_unique_index": "0;1;0;0;0;0;0;1", + "aff_campus_unique": "Harbin;Asia;", + "aff_country_unique_index": "0;0;0;0;0;0;1;0;0", + "aff_country_unique": "China;Singapore" + }, + { + "id": "2022.emnlp-main.333", + "title": "Towards Multi-Modal Sarcasm Detection via Hierarchical Congruity Modeling with Knowledge Enhancement", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Sarcasm is a linguistic phenomenon indicating a discrepancy between literal meanings and implied intentions. Due to its sophisticated nature, it is usually difficult to be detected from the text itself. As a result, multi-modal sarcasm detection has received more and more attention in both academia and industries. However, most existing techniques only modeled the atomic-level inconsistencies between the text input and its accompanying image, ignoring more complex compositions for both modalities. Moreover, they neglected the rich information contained in external knowledge, e.g., image captions. In this paper, we propose a novel hierarchical framework for sarcasm detection by exploring both the atomic-level congruity based on multi-head cross attentions and the composition-level congruity based on graph neural networks, where a post with low congruity can be identified as sarcasm. In addition, we exploit the effect of various knowledge resources for sarcasm detection. Evaluation results on a public multi-modal sarcasm detection dataset based on Twitter demonstrate the superiority of our proposed model.", + "author": "Hui Liu; Wenya Wang; Haoliang Li", + "authorids": "/h/hui-liu/; /w/wenya-wang/; /h/haoliang-li/", + "bibtex": "@inproceedings{liu-etal-2022-towards-multi-modal,\n title = \"Towards Multi-Modal Sarcasm Detection via Hierarchical Congruity Modeling with Knowledge Enhancement\",\n author = \"Liu, Hui and\n Wang, Wenya and\n Li, Haoliang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.333/\",\n doi = \"10.18653/v1/2022.emnlp-main.333\",\n pages = \"4995--5006\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.333.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.333/", + "pdf_size": 2435253, + "gs_citation": 93, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18119141194103137749&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "City University of Hong Kong; Nanyang Technological University + University of Washington; City University of Hong Kong", + "aff_domain": "my.cityu.edu.hk;ntu.edu.sg;cityu.edu.hk", + "email": "my.cityu.edu.hk;ntu.edu.sg;cityu.edu.hk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;0", + "aff_unique_norm": "City University of Hong Kong;Nanyang Technological University;University of Washington", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.cityu.edu.hk;https://www.ntu.edu.sg;https://www.washington.edu", + "aff_unique_abbr": "CityU;NTU;UW", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+2;0", + "aff_country_unique": "China;Singapore;United States" + }, + { + "id": "2022.emnlp-industry.11", + "title": "Towards Need-Based Spoken Language Understanding Model Updates: What Have We Learned?", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "In productionized machine learning systems, online model performance is known to deteriorate over time when there is a distributional drift between offline training and online application data. As a remedy, models are typically retrained at fixed time intervals, implying high computational and manual costs. This work aims at decreasing such costs in productionized, large-scale Spoken Language Understanding systems. In particular, we develop a need-based re-training strategy guided by an efficient drift detector and discuss the arising challenges including system complexity, overlapping model releases, observation limitation and the absence of annotated resources at runtime. We present empirical results on historical data and confirm the utility of our design decisions via an online A/B experiment.", + "author": "Quynh Do; Judith Gaspers; Daniil Sorokin; Patrick Lehnen", + "authorids": "/q/quynh-do/; /j/judith-gaspers/; /d/daniil-sorokin/; /p/patrick-lehnen/", + "bibtex": "@inproceedings{do-etal-2022-towards,\n title = \"Towards Need-Based Spoken Language Understanding Model Updates: What Have We Learned?\",\n author = \"Do, Quynh and\n Gaspers, Judith and\n Sorokin, Daniil and\n Lehnen, Patrick\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.11/\",\n doi = \"10.18653/v1/2022.emnlp-industry.11\",\n pages = \"121--127\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.11.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.11/", + "pdf_size": 260014, + "gs_citation": 0, + "gs_cited_by_link": "https://scholar.google.com/scholar?q=related:N8ca-NPu9FMJ:scholar.google.com/&scioq=Towards+Need-Based+Spoken+Language+Understanding+Model+Updates:+What+Have+We+Learned%3F&hl=en&as_sdt=0,33", + "gs_version_total": 2, + "aff": "Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI", + "aff_domain": "amazon.de;amazon.de;amazon.de;amazon.de", + "email": "amazon.de;amazon.de;amazon.de;amazon.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Amazon", + "aff_unique_dep": "Alexa AI", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.599", + "title": "Towards Opening the Black Box of Neural Machine Translation: Source and Target Interpretations of the Transformer", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In Neural Machine Translation (NMT), each token prediction is conditioned on the source sentence and the target prefix (what has been previously translated at a decoding step). However, previous work on interpretability in NMT has mainly focused solely on source sentence tokens\u2019 attributions. Therefore, we lack a full understanding of the influences of every input token (source sentence and target prefix) in the model predictions. In this work, we propose an interpretability method that tracks input tokens\u2019 attributions for both contexts. Our method, which can be extended to any encoder-decoder Transformer-based model, allows us to better comprehend the inner workings of current NMT models. We apply the proposed method to both bilingual and multilingual Transformers and present insights into their behaviour.", + "author": "Javier Ferrando; Gerard I. G\u00e1llego; Belen Alastruey; Carlos Escolano; Marta R. Costa-juss\u00e0", + "authorids": "/j/javier-ferrando/; /g/gerard-i-gallego/; /b/belen-alastruey/; /c/carlos-escolano/; /m/marta-r-costa-jussa/", + "bibtex": "@inproceedings{ferrando-etal-2022-towards,\n title = \"Towards Opening the Black Box of Neural Machine Translation: Source and Target Interpretations of the Transformer\",\n author = \"Ferrando, Javier and\n G{\\'a}llego, Gerard I. and\n Alastruey, Belen and\n Escolano, Carlos and\n Costa-juss{\\`a}, Marta R.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.599/\",\n doi = \"10.18653/v1/2022.emnlp-main.599\",\n pages = \"8756--8769\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.599.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.599/", + "pdf_size": 2941393, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18301257932691229505&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "TALP Research Center, Universitat Polit\u00e8cnica de Catalunya; TALP Research Center, Universitat Polit\u00e8cnica de Catalunya; TALP Research Center, Universitat Polit\u00e8cnica de Catalunya; TALP Research Center, Universitat Polit\u00e8cnica de Catalunya; Meta AI", + "aff_domain": "upc.edu;upc.edu;upc.edu;upc.edu;meta.com", + "email": "upc.edu;upc.edu;upc.edu;upc.edu;meta.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Universitat Polit\u00e8cnica de Catalunya;Meta Platforms, Inc.", + "aff_unique_dep": "TALP Research Center;Meta AI", + "aff_unique_url": "https://www.upc.edu;https://meta.com", + "aff_unique_abbr": "UPC;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1", + "aff_country_unique": "Spain;United States" + }, + { + "id": "2022.emnlp-main.544", + "title": "Towards Pragmatic Production Strategies for Natural Language Generation Tasks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This position paper proposes a conceptual framework for the design of Natural Language Generation (NLG) systems that follow efficient and effective production strategies in order to achieve complex communicative goals. In this general framework, efficiency is characterised as the parsimonious regulation of production and comprehension costs while effectiveness is measured with respect to task-oriented and contextually grounded communicative goals. We provide concrete suggestions for the estimation of goals, costs, and utility via modern statistical methods, demonstrating applications of our framework to the classic pragmatic task of visually grounded referential games and to abstractive text summarisation, two popular generation tasks with real-world applications. In sum, we advocate for the development of NLG systems that learn to make pragmatic production decisions from experience, by reasoning about goals, costs, and utility in a human-like way.", + "author": "Mario Giulianelli", + "authorids": "/m/mario-giulianelli/", + "bibtex": "@inproceedings{giulianelli-2022-towards,\n title = \"Towards Pragmatic Production Strategies for Natural Language Generation Tasks\",\n author = \"Giulianelli, Mario\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.544/\",\n doi = \"10.18653/v1/2022.emnlp-main.544\",\n pages = \"7978--7984\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.544.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.544/", + "pdf_size": 188623, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5096066090324969604&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "", + "aff_domain": "upc.edu;upc.edu;upc.edu;upc.edu;meta.com", + "email": "upc.edu;upc.edu;upc.edu;upc.edu;meta.com", + "github": "", + "project": "", + "author_num": 1 + }, + { + "id": "2022.findings-emnlp.29", + "title": "Towards Realistic Low-resource Relation Extraction: A Benchmark with Empirical Baseline Study", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This paper presents an empirical study to build relation extraction systems in low-resource settings. Based upon recent pre-trained language models, we comprehensively investigate three schemes to evaluate the performance in low-resource settings: (i) different types of prompt-based methods with few-shot labeled data; (ii) diverse balancing methods to address the long-tailed distribution issue; (iii) data augmentation technologies and self-training to generate more labeled in-domain data. We create a benchmark with 8 relation extraction (RE) datasets covering different languages, domains and contexts and perform extensive comparisons over the proposed schemes with combinations. Our experiments illustrate: (i) Though prompt-based tuning is beneficial in low-resource RE, there is still much potential for improvement, especially in extracting relations from cross-sentence contexts with multiple relational triples; (ii) Balancing methods are not always helpful for RE with long-tailed distribution; (iii) Data augmentation complements existing baselines and can bring much performance gain, while self-training may not consistently achieve advancement to low-resource RE. Code and datasets are in https://github.com/zjunlp/LREBench.", + "author": "Xin Xu; Xiang Chen; Ningyu Zhang; Xin Xie; Xi Chen; Huajun Chen", + "authorids": "/x/xin-xu/; /x/xiang-chen/; /n/ningyu-zhang/; /x/xin-xie/; /x/xi-chen/; /h/huajun-chen/", + "bibtex": "@inproceedings{xu-etal-2022-towards-realistic,\n title = \"Towards Realistic Low-resource Relation Extraction: A Benchmark with Empirical Baseline Study\",\n author = \"Xu, Xin and\n Chen, Xiang and\n Zhang, Ningyu and\n Xie, Xin and\n Chen, Xi and\n Chen, Huajun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.29/\",\n doi = \"10.18653/v1/2022.findings-emnlp.29\",\n pages = \"413--427\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.29.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.29/", + "pdf_size": 624959, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=486915805424885825&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 4, + "aff": "Zhejiang University & AZFT Joint Lab for Knowledge Engine+Hangzhou Innovation Center, Zhejiang University; Zhejiang University & AZFT Joint Lab for Knowledge Engine+Hangzhou Innovation Center, Zhejiang University; Zhejiang University & AZFT Joint Lab for Knowledge Engine+Hangzhou Innovation Center, Zhejiang University; Zhejiang University & AZFT Joint Lab for Knowledge Engine+Hangzhou Innovation Center, Zhejiang University; Tencent; Zhejiang University & AZFT Joint Lab for Knowledge Engine+Hangzhou Innovation Center, Zhejiang University", + "aff_domain": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;tencent.com;zju.edu.cn", + "email": "zju.edu.cn;zju.edu.cn;zju.edu.cn;zju.edu.cn;tencent.com;zju.edu.cn", + "github": "https://github.com/zjunlp/LREBench", + "project": "https://zjunlp.github.io/project/LREBench", + "author_num": 6, + "aff_unique_index": "0+0;0+0;0+0;0+0;1;0+0", + "aff_unique_norm": "Zhejiang University;Tencent Holdings Limited", + "aff_unique_dep": "Joint Lab for Knowledge Engine;", + "aff_unique_url": "http://www.zju.edu.cn;https://www.tencent.com", + "aff_unique_abbr": "ZJU;Tencent", + "aff_campus_unique_index": "1;1;1;1;1", + "aff_campus_unique": ";Hangzhou", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.242", + "title": "Towards Reinterpreting Neural Topic Models via Composite Activations", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Most Neural Topic Models (NTM) use a variational auto-encoder framework producing K topics limited to the size of the encoder\u2019s output. These topics are interpreted through the selection of the top activated words via the weights or reconstructed vector of the decoder that are directly connected to each neuron. In this paper, we present a model-free two-stage process to reinterpret NTM and derive further insights on the state of the trained model. Firstly, building on the original information from a trained NTM, we generate a pool of potential candidate \u201ccomposite topics\u201d by exploiting possible co-occurrences within the original set of topics, which decouples the strict interpretation of topics from the original NTM. This is followed by a combinatorial formulation to select a final set of composite topics, which we evaluate for coherence and diversity on a large external corpus. Lastly, we employ a user study to derive further insights on the reinterpretation process.", + "author": "Jia Peng Lim; Hady Lauw", + "authorids": "/j/jia-peng-lim/; /h/hady-lauw/", + "bibtex": "@inproceedings{lim-lauw-2022-towards,\n title = \"Towards Reinterpreting Neural Topic Models via Composite Activations\",\n author = \"Lim, Jia Peng and\n Lauw, Hady\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.242/\",\n doi = \"10.18653/v1/2022.emnlp-main.242\",\n pages = \"3688--3703\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.242.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.242/", + "pdf_size": 623149, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7649751115717857805&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Singapore Management University; Singapore Management University", + "aff_domain": "smu.edu.sg;smu.edu.sg", + "email": "smu.edu.sg;smu.edu.sg", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Singapore Management University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.smu.edu.sg", + "aff_unique_abbr": "SMU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "2022.findings-emnlp.445", + "title": "Towards Robust NLG Bias Evaluation with Syntactically-diverse Prompts", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We present a robust methodology for evaluating biases in natural language generation(NLG) systems. Previous works use fixed hand-crafted prefix templates with mentions of various demographic groups to prompt models to generate continuations for bias analysis. These fixed prefix templates could themselves be specific in terms of styles or linguistic structures, which may lead to unreliable fairness conclusions that are not representative of the general trends from tone varying prompts. To study this problem, we paraphrase the prompts with different syntactic structures and use these to evaluate demographic bias in NLG systems. Our results suggest similar overall bias trends but some syntactic structures lead to contradictory conclusions compared to past works. We show that our methodology is more robust and that some syntactic structures prompt more toxic content while others could prompt less biased generation. This suggests the importance of not relying on a fixed syntactic structure and using tone-invariant prompts. Introducing syntactically-diverse prompts can achieve more robust NLG (bias) evaluation.", + "author": "Arshiya Aggarwal; Jiao Sun; Nanyun Peng", + "authorids": "/a/arshiya-aggarwal/; /j/jiao-sun/; /n/nanyun-peng/", + "bibtex": "@inproceedings{aggarwal-etal-2022-towards,\n title = \"Towards Robust {NLG} Bias Evaluation with Syntactically-diverse Prompts\",\n author = \"Aggarwal, Arshiya and\n Sun, Jiao and\n Peng, Nanyun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.445/\",\n doi = \"10.18653/v1/2022.findings-emnlp.445\",\n pages = \"6022--6032\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.445.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.445/", + "pdf_size": 1817228, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13656374554313638016&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Columbia University; University of Southern California; University of California, Los Angeles", + "aff_domain": "columbia.edu;usc.edu;cs.ucla.edu", + "email": "columbia.edu;usc.edu;cs.ucla.edu", + "github": "https://github.com/arshiyaaggarwal/Robust-NLG-Bias-Eval", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;2", + "aff_unique_norm": "Columbia University;University of Southern California;University of California, Los Angeles", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.columbia.edu;https://www.usc.edu;https://www.ucla.edu", + "aff_unique_abbr": "Columbia;USC;UCLA", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.542", + "title": "Towards Robust Numerical Question Answering: Diagnosing Numerical Capabilities of NLP Systems", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Numerical Question Answering is the task of answering questions that require numerical capabilities. Previous works introduce general adversarial attacks to Numerical Question Answering, while not systematically exploring numerical capabilities specific to the topic. In this paper, we propose to conduct numerical capability diagnosis on a series of Numerical Question Answering systems and datasets. A series of numerical capabilities are highlighted, and corresponding dataset perturbations are designed. Empirical results indicate that existing systems are severely challenged by these perturbations. E.g., Graph2Tree experienced a 53.83% absolute accuracy drop against the \u201cExtra\u201d perturbation on ASDiv-a, and BART experienced 13.80% accuracy drop against the \u201cLanguage\u201d perturbation on the numerical subset of DROP. As a counteracting approach, we also investigate the effectiveness of applying perturbations as data augmentation to relieve systems\u2019 lack of robust numerical capabilities. With experiment analysis and empirical studies, it is demonstrated that Numerical Question Answering with robust numerical capabilities is still to a large extent an open question. We discuss future directions of Numerical Question Answering and summarize guidelines on future dataset collection and system design.", + "author": "Jialiang Xu; Mengyu Zhou; Xinyi He; Shi Han; Dongmei Zhang", + "authorids": "/j/jialiang-xu/; /m/mengyu-zhou/; /x/xinyi-he/; /s/shi-han/; /d/dongmei-zhang/", + "bibtex": "@inproceedings{xu-etal-2022-towards-robust,\n title = \"Towards Robust Numerical Question Answering: Diagnosing Numerical Capabilities of {NLP} Systems\",\n author = \"Xu, Jialiang and\n Zhou, Mengyu and\n He, Xinyi and\n Han, Shi and\n Zhang, Dongmei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.542/\",\n doi = \"10.18653/v1/2022.emnlp-main.542\",\n pages = \"7950--7966\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.542.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.542/", + "pdf_size": 548863, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9781108041761998489&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of Illinois at Urbana-Champaign+Microsoft Research Asia; Microsoft Research; Xi\u2019an Jiaotong University+Microsoft Research Asia; Microsoft Research; Microsoft Research", + "aff_domain": "illinois.edu;microsoft.com;stu.xjtu.edu.cn;microsoft.com;microsoft.com", + "email": "illinois.edu;microsoft.com;stu.xjtu.edu.cn;microsoft.com;microsoft.com", + "github": "https://github.com/microsoft/NumberDiagnosis7950", + "project": "", + "author_num": 5, + "aff_unique_index": "0+1;2;3+1;2;2", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;Microsoft Research;Microsoft Corporation;Xi'an Jiaotong University", + "aff_unique_dep": ";Research;Microsoft Research;", + "aff_unique_url": "https://illinois.edu;https://www.microsoft.com/en-us/research/group/asia;https://www.microsoft.com/en-us/research;https://www.xjtu.edu.cn", + "aff_unique_abbr": "UIUC;MSR Asia;MSR;XJTU", + "aff_campus_unique_index": "0+1;1", + "aff_campus_unique": "Urbana-Champaign;Asia;", + "aff_country_unique_index": "0+1;0;1+1;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.findings-emnlp.495", + "title": "Towards Robust Visual Question Answering: Making the Most of Biased Samples via Contrastive Learning", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Models for Visual Question Answering (VQA) often rely on the spurious correlations, i.e., the language priors, that appear in the biased samples of training set, which make them brittle against the out-of-distribution (OOD) test data. Recent methods have achieved promising progress in overcoming this problem by reducing the impact of biased samples on model training. However, these models reveal a trade-off that the improvements on OOD data severely sacrifice the performance on the in-distribution (ID) data (which is dominated by the biased samples). Therefore, we propose a novel contrastive learning approach, MMBS, for building robust VQA models by Making the Most of Biased Samples. Specifically, we construct positive samples for contrastive learning by eliminating the information related to spurious correlation from the original training samples and explore several strategies to use the constructed positive samples for training. Instead of undermining the importance of biased samples in model training, our approach precisely exploits the biased samples for unbiased information that contributes to reasoning. The proposed method is compatible with various VQA backbones. We validate our contributions by achieving competitive performance on the OOD dataset VQA-CP v2 while preserving robust performance on the ID dataset VQA v2.", + "author": "Qingyi Si; Yuanxin Liu; Fandong Meng; Zheng Lin; Peng Fu; Yanan Cao; Weiping Wang; Jie Zhou", + "authorids": "/q/qingyi-si/; /y/yuanxin-liu/; /f/fandong-meng/; /z/zheng-lin/; /p/peng-fu/; /y/yanan-cao/; /w/weiping-wang/; /j/jie-zhou/", + "bibtex": "@inproceedings{si-etal-2022-towards,\n title = \"Towards Robust Visual Question Answering: Making the Most of Biased Samples via Contrastive Learning\",\n author = \"Si, Qingyi and\n Liu, Yuanxin and\n Meng, Fandong and\n Lin, Zheng and\n Fu, Peng and\n Cao, Yanan and\n Wang, Weiping and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.495/\",\n doi = \"10.18653/v1/2022.findings-emnlp.495\",\n pages = \"6650--6662\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.495.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.495/", + "pdf_size": 19146494, + "gs_citation": 33, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15087191019749082650&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/PhoebusSi/MMBS", + "project": "", + "author_num": 8 + }, + { + "id": "2022.emnlp-main.367", + "title": "Towards Robust k-Nearest-Neighbor Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "k-Nearest-Neighbor Machine Translation (kNN-MT) becomes an important research direction of NMT in recent years. Its main idea is to retrieve useful key-value pairs from an additional datastore to modify translations without updating the NMT model. However, the underlying retrieved noisy pairs will dramatically deteriorate the model performance. In this paper, we conduct a preliminary study and find that this problem results from not fully exploiting the prediction of the NMT model. To alleviate the impact of noise, we propose a confidence-enhanced kNN-MT model with robust training. Concretely, we introduce the NMT confidence to refine the modeling of two important components of kNN-MT: kNN distribution and the interpolation weight. Meanwhile we inject two types of perturbations into the retrieved pairs for robust training. Experimental results on four benchmark datasets demonstrate that our model not only achieves significant improvements over current kNN-MT models, but also exhibits better robustness. Our code is available at https://github.com/DeepLearnXMU/Robust-knn-mt.", + "author": "Hui Jiang; Ziyao Lu; Fandong Meng; Chulun Zhou; Jie Zhou; Degen Huang; Jinsong Su", + "authorids": "/h/hui-jiang/; /z/ziyao-lu/; /f/fandong-meng/; /c/chulun-zhou/; /j/jie-zhou/; /d/degen-huang/; /j/jinsong-su/", + "bibtex": "@inproceedings{jiang-etal-2022-towards,\n title = \"Towards Robust k-Nearest-Neighbor Machine Translation\",\n author = \"Jiang, Hui and\n Lu, Ziyao and\n Meng, Fandong and\n Zhou, Chulun and\n Zhou, Jie and\n Huang, Degen and\n Su, Jinsong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.367/\",\n doi = \"10.18653/v1/2022.emnlp-main.367\",\n pages = \"5468--5477\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.367.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.367/", + "pdf_size": 2832267, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14651016195338054208&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "School of Informatics, Xiamen University, China + Pattern Recognition Center, WeChat AI, Tencent Inc, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Pattern Recognition Center, WeChat AI, Tencent Inc, China; Dalian University of Technology, China; School of Informatics, Xiamen University, China + Pengcheng Laboratory, China", + "aff_domain": "stu.xmu.edu.cn;tencent.com;tencent.com;tencent.com;tencent.com;dlut.edu.cn;xmu.edu.cn", + "email": "stu.xmu.edu.cn;tencent.com;tencent.com;tencent.com;tencent.com;dlut.edu.cn;xmu.edu.cn", + "github": "https://github.com/DeepLearnXMU/Robust-knn-mt", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;1;1;1;1;2;0+3", + "aff_unique_norm": "Xiamen University;Tencent Inc;Dalian University of Technology;Pengcheng Laboratory", + "aff_unique_dep": "School of Informatics;Pattern Recognition Center, WeChat AI;;", + "aff_unique_url": "https://www.xmu.edu.cn;https://www.tencent.com;http://www.dlut.edu.cn/;", + "aff_unique_abbr": "XMU;Tencent;DUT;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.581", + "title": "Towards Summary Candidates Fusion", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Sequence-to-sequence deep neural models fine-tuned for abstractive summarization can achieve great performance on datasets with enough human annotations. Yet, it has been shown that they have not reached their full potential, with a wide gap between the top beam search output and the oracle beam. Recently, re-ranking methods have been proposed, to learn to select a better summary candidate. However, such methods are limited by the summary quality aspects captured by the first-stage candidates. To bypass this limitation, we propose a new paradigm in second-stage abstractive summarization called SummaFusion that fuses several summary candidates to produce a novel abstractive second-stage summary. Our method works well on several summarization datasets, improving both the ROUGE scores and qualitative properties of fused summaries. It is especially good when the candidates to fuse are worse, such as in the few-shot setup where we set a new state-of-the art. We will make our code and checkpoints available at https://github.com/ntunlp/SummaFusion/.", + "author": "Mathieu Ravaut; Shafiq Joty; Nancy Chen", + "authorids": "/m/mathieu-ravaut/; /s/shafiq-joty/; /n/nancy-chen/", + "bibtex": "@inproceedings{ravaut-etal-2022-towards,\n title = \"Towards Summary Candidates Fusion\",\n author = \"Ravaut, Mathieu and\n Joty, Shafiq and\n Chen, Nancy\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.581/\",\n doi = \"10.18653/v1/2022.emnlp-main.581\",\n pages = \"8488--8504\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.581.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.581/", + "pdf_size": 2543565, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3451668908783389949&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 5, + "aff": "Nanyang Technological University, Singapore + Institute of Infocomm Research (I2R), Singapore; Nanyang Technological University, Singapore + Salesforce Research; Institute of Infocomm Research (I2R), Singapore", + "aff_domain": "e.ntu.edu.sg;ntu.edu.sg;i2r.a-star.edu.sg", + "email": "e.ntu.edu.sg;ntu.edu.sg;i2r.a-star.edu.sg", + "github": "https://github.com/ntunlp/SummaFusion/", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+2;1", + "aff_unique_norm": "Nanyang Technological University;Institute of Infocomm Research;Salesforce", + "aff_unique_dep": ";;Salesforce Research", + "aff_unique_url": "https://www.ntu.edu.sg;https://www.i2r.a-star.edu.sg;https://research.salesforce.com", + "aff_unique_abbr": "NTU;I2R;Salesforce", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+1;0", + "aff_country_unique": "Singapore;United States" + }, + { + "id": "2022.emnlp-main.562", + "title": "Towards Table-to-Text Generation with Pretrained Language Model: A Table Structure Understanding and Text Deliberating Approach", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Although remarkable progress on the neural table-to-text methods has been made, the generalization issues hinder the applicability of these models due to the limited source tables. Large-scale pretrained language models sound like a promising solution to tackle such issues. However, how to effectively bridge the gap between the structured table and the text input by fully leveraging table information to fuel the pretrained model is still not well explored. Besides, another challenge of integrating the deliberation mechanism into the text-to-text pretrained model for solving the table-to-text task remains seldom studied. In this paper, to implement the table-to-text generation with pretrained language model, we propose a table structure understanding and text deliberating approach, namely TASD. To be specific, we devise a three-layered multi-head attention network to realize the table-structureaware text generation model with the help of the pretrained language model. Furthermore, a multi-pass decoder framework is adopted to enhance the capability of polishing generated text for table descriptions. The empirical studies, as well as human evaluation, on two public datasets, validate that our approach can generate faithful and fluent descriptive texts for different types of tables.", + "author": "Miao Chen; Xinjiang Lu; Tong Xu; Yanyan Li; Zhou Jingbo; Dejing Dou; Hui Xiong", + "authorids": "/m/miao-chen/; /x/xinjiang-lu/; /t/tong-xu/; /y/yanyan-li/; /z/zhou-jingbo/; /d/dejing-dou/; /h/hui-xiong/", + "bibtex": "@inproceedings{chen-etal-2022-towards-table,\n title = \"Towards Table-to-Text Generation with Pretrained Language Model: A Table Structure Understanding and Text Deliberating Approach\",\n author = \"Chen, Miao and\n Lu, Xinjiang and\n Xu, Tong and\n Li, Yanyan and\n Jingbo, Zhou and\n Dou, Dejing and\n Xiong, Hui\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.562/\",\n doi = \"10.18653/v1/2022.emnlp-main.562\",\n pages = \"8199--8210\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.562.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.562/", + "pdf_size": 3018568, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16391542469192417875&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of Science and Technology of China; BIL, Baidu Research; University of Science and Technology of China; BIL, Baidu Research; BIL, Baidu Research; BIL, Baidu Research; Hong Kong University of Science and Technology (Guangzhou) + Guangzhou HKUST Fok Ying Tung Research Institute", + "aff_domain": "mail.ustc.edu.cn;baidu.com;baidu.com;baidu.com;ustc.edu.cn;baidu.com;ust.hk", + "email": "mail.ustc.edu.cn;baidu.com;baidu.com;baidu.com;ustc.edu.cn;baidu.com;ust.hk", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;1;1;1;2+2", + "aff_unique_norm": "University of Science and Technology of China;Baidu Research;Hong Kong University of Science and Technology", + "aff_unique_dep": ";Baidu Research;", + "aff_unique_url": "http://www.ustc.edu.cn;https://research.baidu.com;https://www.ust.hk", + "aff_unique_abbr": "USTC;Baidu;HKUST", + "aff_campus_unique_index": "1+1", + "aff_campus_unique": ";Guangzhou", + "aff_country_unique_index": "0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.644", + "title": "Towards Teachable Reasoning Systems: Using a Dynamic Memory of User Feedback for Continual System Improvement", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Our goal is a teachable reasoning system for question-answering (QA), where a user can interact with faithful answer explanations, and correct its errors so that the system improves over time. Our approach is to augment a QA model with a dynamic memory of user feedback, containing user-supplied corrections toerroneous model beliefs that users identify during interaction. Retrievals from memory are used as additional context for QA, to help avoid previous mistakes in similar new situations - a novel application of memory-based continuous learning. With simulated feedback, we find that our system (called TeachMe) continually improves with time, and without model retraining, requiring feedback on only 25% of training examples to reach within 1% of the upper-bound (feedback on all examples). Similarly, in experiments with real users, we observe a similar trend, with performance improving by over 15% on a hidden test set after teaching. This suggests new opportunities for using frozen language models in an interactive setting where users can inspect, debug, and correct the model\u2019s beliefs, leading to improved system\u2019s performance over time.", + "author": "Bhavana Dalvi Mishra; Oyvind Tafjord; Peter Clark", + "authorids": "/b/bhavana-dalvi/; /o/oyvind-tafjord/; /p/peter-clark/", + "bibtex": "@inproceedings{dalvi-mishra-etal-2022-towards,\n title = \"Towards Teachable Reasoning Systems: Using a Dynamic Memory of User Feedback for Continual System Improvement\",\n author = \"Dalvi Mishra, Bhavana and\n Tafjord, Oyvind and\n Clark, Peter\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.644/\",\n doi = \"10.18653/v1/2022.emnlp-main.644\",\n pages = \"9465--9480\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.644.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.644/", + "pdf_size": 1967893, + "gs_citation": 36, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4302033960784372612&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Allen Institute for AI, Seattle, WA; Allen Institute for AI, Seattle, WA; Allen Institute for AI, Seattle, WA", + "aff_domain": "allenai.org;allenai.org;allenai.org", + "email": "allenai.org;allenai.org;allenai.org", + "github": "", + "project": "https://allenai.org/data/teachme", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Allen Institute for AI", + "aff_unique_dep": "", + "aff_unique_url": "https://allenai.org", + "aff_unique_abbr": "AI2", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Seattle", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.180", + "title": "Towards Tracing Knowledge in Language Models Back to the Training Data", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Language models (LMs) have been shown to memorize a great deal of factual knowledge contained in their training data. But when an LM generates an assertion, it is often difficult to determine where it learned this information and whether it is true. In this paper, we propose the problem of fact tracing: identifying which training examples taught an LM to generate a particular factual assertion. Prior work on training data attribution (TDA) may offer effective tools for identifying such examples, known as \u201cproponents\u201d. We present the first quantitative benchmark to evaluate this. We compare two popular families of TDA methods \u2014 gradient-based and embedding-based \u2014 and find that much headroom remains. For example, both methods have lower proponent-retrieval precision than an information retrieval baseline (BM25) that does not have access to the LM at all. We identify key challenges that may be necessary for further improvement such as overcoming the problem of gradient saturation, and also show how several nuanced implementation details of existing neural TDA methods can significantly improve overall fact tracing performance.", + "author": "Ekin Akyurek; Tolga Bolukbasi; Frederick Liu; Binbin Xiong; Ian Tenney; Jacob Andreas; Kelvin Guu", + "authorids": "/e/ekin-akyurek/; /t/tolga-bolukbasi/; /f/frederick-liu/; /b/binbin-xiong/; /i/ian-tenney/; /j/jacob-andreas/; /k/kelvin-guu/", + "bibtex": "@inproceedings{akyurek-etal-2022-towards,\n title = \"Towards Tracing Knowledge in Language Models Back to the Training Data\",\n author = \"Akyurek, Ekin and\n Bolukbasi, Tolga and\n Liu, Frederick and\n Xiong, Binbin and\n Tenney, Ian and\n Andreas, Jacob and\n Guu, Kelvin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.180/\",\n doi = \"10.18653/v1/2022.findings-emnlp.180\",\n pages = \"2429--2446\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.180.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.180/", + "pdf_size": 1142313, + "gs_citation": 85, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12651843597889548436&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "Google Research\u2020; MIT CSAIL\u2020; Google Research; Google Research; Google Research; MIT CSAIL\u2020; Google Research", + "aff_domain": "mit.edu; ; ; ; ; ; ", + "email": "mit.edu; ; ; ; ; ; ", + "github": "https://github.com/ekinakyurek/influence", + "project": "https://huggingface.co/datasets/ekinakyurek/ftrace", + "author_num": 7, + "aff_unique_index": "0;1;0;0;0;1;0", + "aff_unique_norm": "Google;Massachusetts Institute of Technology", + "aff_unique_dep": "Google Research;Computer Science and Artificial Intelligence Laboratory", + "aff_unique_url": "https://research.google;http://www.csail.mit.edu", + "aff_unique_abbr": "Google Research;MIT CSAIL", + "aff_campus_unique_index": "0;1;0;0;0;1;0", + "aff_campus_unique": "Mountain View;Cambridge", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.37", + "title": "Towards Unified Prompt Tuning for Few-shot Text Classification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Prompt-based fine-tuning has boosted the performance of Pre-trained Language Models (PLMs) on few-shot text classification by employing task-specific prompts. Yet, PLMs are unfamiliar with prompt-style expressions during pre-training, which limits the few-shot learning performance on downstream tasks.It would be desirable if the models can acquire some prompting knowledge before adapting to specific NLP tasks. We present the Unified Prompt Tuning (UPT) framework, leading to better few-shot text classification for BERT-style models by explicitly capturing prompting semantics from non-target NLP datasets. In UPT, a novel paradigm Prompt-Options-Verbalizer is proposed for joint prompt learning across different NLP tasks, forcing PLMs to capture task-invariant prompting knowledge. We further design a self-supervised task named Knowledge-enhanced Selective Masked Language Modeling to improve the PLM\u2019s generalization abilities for accurate adaptation to previously unseen tasks. After multi-task learning across multiple tasks, the PLM can be better prompt-tuned towards any dissimilar target tasks in low-resourced settings. Experiments over a variety of NLP tasks show that UPT consistently outperforms state-of-the-arts for prompt-based fine-tuning.", + "author": "Jianing Wang; Chengyu Wang; Fuli Luo; Chuanqi Tan; Minghui Qiu; Fei Yang; Qiuhui Shi; Songfang Huang; Ming Gao", + "authorids": "/j/jianing-wang/; /c/chengyu-wang/; /f/fuli-luo/; /c/chuanqi-tan/; /m/minghui-qiu/; /f/fei-yang/; /q/qiuhui-shi/; /s/songfang-huang/; /m/ming-gao/", + "bibtex": "@inproceedings{wang-etal-2022-towards-unified,\n title = \"Towards Unified Prompt Tuning for Few-shot Text Classification\",\n author = \"Wang, Jianing and\n Wang, Chengyu and\n Luo, Fuli and\n Tan, Chuanqi and\n Qiu, Minghui and\n Yang, Fei and\n Shi, Qiuhui and\n Huang, Songfang and\n Gao, Ming\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.37/\",\n doi = \"10.18653/v1/2022.findings-emnlp.37\",\n pages = \"524--536\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.37.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.37/", + "pdf_size": 561728, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12993530826734366970&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Data Science and Engineering, East China Normal University, Shanghai, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Alibaba Group, Hangzhou, China; Zhejiang Lab, Hangzhou, China; Ant Group, Hangzhou, China; Alibaba Group, Hangzhou, China; School of Data Science and Engineering, East China Normal University, Shanghai, China + Shanghai Key Laboratory of Mental Health and Psychological Crisis Intervention, School of Psychology and Cognitive Science, East China Normal University, Shanghai, China", + "aff_domain": "gmail.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;zhejianglab.com;antgroup.com;alibaba-inc.com;dase.ecnu.edu.cn", + "email": "gmail.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;zhejianglab.com;antgroup.com;alibaba-inc.com;dase.ecnu.edu.cn", + "github": "https://github.com/alibaba/EasyNLP", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;1;1;1;2;3;1;0+0", + "aff_unique_norm": "East China Normal University;Alibaba Group;Zhejiang Lab;Ant Group", + "aff_unique_dep": "School of Data Science and Engineering;;;", + "aff_unique_url": "http://www.ecnu.edu.cn;https://www.alibaba.com;http://www.zhejianglab.com;https://www.antgroup.com", + "aff_unique_abbr": "ECNU;Alibaba;;Ant Group", + "aff_campus_unique_index": "0;1;1;1;1;1;1;1;0+0", + "aff_campus_unique": "Shanghai;Hangzhou", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.442", + "title": "Towards Unifying Reference Expression Generation and Comprehension", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Reference Expression Generation (REG) and Comprehension (REC) are two highly correlated tasks. Modeling REG and REC simultaneously for utilizing the relation between them is a promising way to improve both. However, the problem of distinct inputs, as well as building connections between them in a single model, brings challenges to the design and training of the joint model. To address the problems, we propose a unified model for REG and REC, named UniRef. It unifies these two tasks with the carefully-designed Image-Region-Text Fusion layer (IRTF), which fuses the image, region and text via the image cross-attention and region cross-attention. Additionally, IRTF could generate pseudo input regions for the REC task to enable a uniform way for sharing the identical representation space across the REC and REG. We further propose Vision-conditioned Masked Language Modeling (VMLM) and Text-Conditioned Region Prediction (TRP) to pre-train UniRef model on multi-granular corpora. The VMLM and TRP are directly related to REG and REC, respectively, but could help each other. We conduct extensive experiments on three benchmark datasets, RefCOCO, RefCOCO+ and RefCOCOg. Experimental results show that our model outperforms previous state-of-the-art methods on both REG and REC.", + "author": "Duo Zheng; Tao Kong; Ya Jing; Jiaan Wang; Xiaojie Wang", + "authorids": "/d/duo-zheng/; /t/tao-kong/; /y/ya-jing/; /j/jiaan-wang/; /x/xiaojie-wang/", + "bibtex": "@inproceedings{zheng-etal-2022-towards-unifying,\n title = \"Towards Unifying Reference Expression Generation and Comprehension\",\n author = \"Zheng, Duo and\n Kong, Tao and\n Jing, Ya and\n Wang, Jiaan and\n Wang, Xiaojie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.442/\",\n doi = \"10.18653/v1/2022.emnlp-main.442\",\n pages = \"6598--6611\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.442.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.442/", + "pdf_size": 7481614, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9204011941520399869&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.131", + "title": "Towards a Unified Multi-Dimensional Evaluator for Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multi-dimensional evaluation is the dominant paradigm for human evaluation in Natural Language Generation (NLG), i.e., evaluating the generated text from multiple explainable dimensions, such as coherence and fluency. However, automatic evaluation in NLG is still dominated by similarity-based metrics, and we lack a reliable framework for a more comprehensive evaluation of advanced models. In this paper, we propose a unified multi-dimensional evaluator UniEval for NLG. We re-frame NLG evaluation as a Boolean Question Answering (QA) task, and by guiding the model with different questions, we can use one evaluator to evaluate from multiple dimensions. Furthermore, thanks to the unified Boolean QA format, we are able to introduce an intermediate learning phase that enables UniEval to incorporate external knowledge from multiple related tasks and gain further improvement. Experiments on three typical NLG tasks show that UniEval correlates substantially better with human judgments than existing metrics. Specifically, compared to the top-performing unified evaluators, UniEval achieves a 23% higher correlation on text summarization, and over 43% on dialogue response generation. Also, UniEval demonstrates a strong zero-shot learning ability for unseen evaluation dimensions and tasks. Source code, data, and all pre-trained evaluators are available at https://github.com/maszhongming/UniEval.", + "author": "Ming Zhong; Yang Liu; Da Yin; Yuning Mao; Yizhu Jiao; Pengfei Liu; Chenguang Zhu; Heng Ji; Jiawei Han", + "authorids": "/m/ming-zhong/; /y/yang-liu/; /d/da-yin/; /y/yuning-mao/; /y/yizhu-jiao/; /p/pengfei-liu/; /c/chenguang-zhu/; /h/heng-ji/; /j/jiawei-han/", + "bibtex": "@inproceedings{zhong-etal-2022-towards,\n title = \"Towards a Unified Multi-Dimensional Evaluator for Text Generation\",\n author = \"Zhong, Ming and\n Liu, Yang and\n Yin, Da and\n Mao, Yuning and\n Jiao, Yizhu and\n Liu, Pengfei and\n Zhu, Chenguang and\n Ji, Heng and\n Han, Jiawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.131/\",\n doi = \"10.18653/v1/2022.emnlp-main.131\",\n pages = \"2023--2038\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.131.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.131/", + "pdf_size": 776595, + "gs_citation": 242, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11665992617413981789&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 6, + "aff": "University of Illinois at Urbana-Champaign; Microsoft Cognitive Services Research; University of California, Los Angeles; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; Carnegie Mellon University; Microsoft Cognitive Services Research; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign", + "aff_domain": "illinois.edu;microsoft.com;cs.ucla.edu;illinois.edu;illinois.edu;cs.cmu.edu;microsoft.com;illinois.edu;illinois.edu", + "email": "illinois.edu;microsoft.com;cs.ucla.edu;illinois.edu;illinois.edu;cs.cmu.edu;microsoft.com;illinois.edu;illinois.edu", + "github": "https://github.com/maszhongming/UniEval", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;2;0;0;3;1;0;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;Microsoft;University of California, Los Angeles;Carnegie Mellon University", + "aff_unique_dep": ";Cognitive Services Research;;", + "aff_unique_url": "https://illinois.edu;https://www.microsoft.com;https://www.ucla.edu;https://www.cmu.edu", + "aff_unique_abbr": "UIUC;Microsoft;UCLA;CMU", + "aff_campus_unique_index": "0;2;0;0;0;0", + "aff_campus_unique": "Urbana-Champaign;;Los Angeles", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.738", + "title": "Towards relation extraction from speech", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Relation extraction typically aims to extract semantic relationships between entities from the unstructured text.One of the most essential data sources for relation extraction is the spoken language, such as interviews and dialogues.However, the error propagation introduced in automatic speech recognition (ASR) has been ignored in relation extraction, and the end-to-end speech-based relation extraction method has been rarely explored.In this paper, we propose a new listening information extraction task, i.e., speech relation extraction.We construct the training dataset for speech relation extraction via text-to-speech systems, and we construct the testing dataset via crowd-sourcing with native English speakers.We explore speech relation extraction via two approaches: the pipeline approach conducting text-based extraction with a pretrained ASR module, and the end2end approach via a new proposed encoder-decoder model, or what we called SpeechRE.We conduct comprehensive experiments to distinguish the challenges in speech relation extraction, which may shed light on future explorations. We share the code and data on https://github.com/wutong8023/SpeechRE.", + "author": "Tongtong Wu; Guitao Wang; Jinming Zhao; Zhaoran Liu; Guilin Qi; Yuan-Fang Li; Gholamreza Haffari", + "authorids": "/t/tongtong-wu/; /g/guitao-wang/; /j/jinming-zhao/; /z/zhaoran-liu/; /g/guilin-qi/; /y/yuan-fang-li/; /g/gholamreza-haffari/", + "bibtex": "@inproceedings{wu-etal-2022-towards-relation,\n title = \"Towards relation extraction from speech\",\n author = \"Wu, Tongtong and\n Wang, Guitao and\n Zhao, Jinming and\n Liu, Zhaoran and\n Qi, Guilin and\n Li, Yuan-Fang and\n Haffari, Gholamreza\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.738/\",\n doi = \"10.18653/v1/2022.emnlp-main.738\",\n pages = \"10751--10762\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.738.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.738/", + "pdf_size": 1266224, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14395944860142857879&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Southeast University, China; Southeast University, China; Southeast University, China; Zhejiang University, China; Southeast University, China; Monash University, Australia; Monash University, Australia", + "aff_domain": "seu.edu.cn;seu.edu.cn;seu.edu.cn;gmail.com;seu.edu.cn;monash.edu;monash.edu", + "email": "seu.edu.cn;seu.edu.cn;seu.edu.cn;gmail.com;seu.edu.cn;monash.edu;monash.edu", + "github": "https://github.com/wutong8023/SpeechRE", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;1;0;2;2", + "aff_unique_norm": "Southeast University;Zhejiang University;Monash University", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.seu.edu.cn/;http://www.zju.edu.cn;https://www.monash.edu", + "aff_unique_abbr": "SEU;ZJU;Monash", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;1;1", + "aff_country_unique": "China;Australia" + }, + { + "id": "2022.emnlp-main.84", + "title": "Tracing Semantic Variation in Slang", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The meaning of a slang term can vary in different communities. However, slang semantic variation is not well understood and under-explored in the natural language processing of slang. One existing view argues that slang semantic variation is driven by culture-dependent communicative needs. An alternative view focuses on slang\u2019s social functions suggesting that the desire to foster semantic distinction may have led to the historical emergence of community-specific slang senses. We explore these theories using computational models and test them against historical slang dictionary entries, with a focus on characterizing regularity in the geographical variation of slang usages attested in the US and the UK over the past two centuries. We show that our models are able to predict the regional identity of emerging slang word meanings from historical slang records. We offer empirical evidence that both communicative need and semantic distinction play a role in the variation of slang meaning yet their relative importance fluctuates over the course of history. Our work offers an opportunity for incorporating historical cultural elements into the natural language processing of slang.", + "author": "Zhewei Sun; Yang Xu", + "authorids": "/z/zhewei-sun/; /y/yang-xu/", + "bibtex": "@inproceedings{sun-xu-2022-tracing,\n title = \"Tracing Semantic Variation in Slang\",\n author = \"Sun, Zhewei and\n Xu, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.84/\",\n doi = \"10.18653/v1/2022.emnlp-main.84\",\n pages = \"1299--1313\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.84.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.84/", + "pdf_size": 387610, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7456575494215293400&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff": "Department of Computer Science, University of Toronto, Toronto, Canada + Cognitive Science Program, University of Toronto, Toronto, Canada; Department of Computer Science, University of Toronto, Toronto, Canada + Cognitive Science Program, University of Toronto, Toronto, Canada", + "aff_domain": "cs.toronto.edu;cs.toronto.edu", + "email": "cs.toronto.edu;cs.toronto.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "University of Toronto", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.utoronto.ca", + "aff_unique_abbr": "U of T", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Toronto", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "Canada" + }, + { + "id": "2022.emnlp-industry.24", + "title": "Tractable & Coherent Multi-Document Summarization: Discrete Optimization of Multiple Neural Modeling Streams via Integer Linear Programming", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "One key challenge in multi-document summarization is the generated summary is often less coherent compared to single document summarization due to the larger heterogeneity of the input source content. In this work, we propose a generic framework to jointly consider coherence and informativeness in multi-document summarization and offers provisions to replace individual components based on the domain of source text. In particular, the framework characterizes coherence through verb transitions and entity mentions and takes advantage of syntactic parse trees and neural modeling for intra-sentential noise pruning. The framework cast the entire problem as an integer linear programming optimization problem with neural and non-neural models as linear components. We evaluate our method in the news and legal domains. The proposed approach consistently performs better than competitive baselines for both objective metrics and human evaluation.", + "author": "Litton J Kurisinkel; Nancy Chen", + "authorids": "/l/litton-j-kurisinkel/; /n/nancy-chen/", + "bibtex": "@inproceedings{j-kurisinkel-chen-2022-tractable,\n title = \"Tractable {\\&} Coherent Multi-Document Summarization: Discrete Optimization of Multiple Neural Modeling Streams via Integer Linear Programming\",\n author = \"J Kurisinkel, Litton and\n Chen, Nancy\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.24/\",\n doi = \"10.18653/v1/2022.emnlp-industry.24\",\n pages = \"237--243\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.24.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.24/", + "pdf_size": 396960, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14011070402550556136&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Institute for Infocomm Research, A*STAR, Singapore; Institute for Infocomm Research, A*STAR, Singapore", + "aff_domain": "i2r.a-star.edu.sg;i2r.a-star.edu.sg", + "email": "i2r.a-star.edu.sg;i2r.a-star.edu.sg", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Institute for Infocomm Research", + "aff_unique_dep": "", + "aff_unique_url": "https://www.i2r.a-star.edu.sg", + "aff_unique_abbr": "I2R", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "Singapore" + }, + { + "id": "2022.findings-emnlp.361", + "title": "Train Flat, Then Compress: Sharpness-Aware Minimization Learns More Compressible Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Model compression by way of parameter pruning, quantization, or distillation has recently gained popularity as an approach for reducing the computational requirements of modern deep neural network models for NLP. Inspired by prior works suggesting a connection between simpler, more generalizable models and those that lie within wider loss basins, we hypothesize that optimizing for flat minima should lead to simpler parameterizations and thus more compressible models. We propose to combine sharpness-aware minimization (SAM) with various task-specific model compression methods, including iterative magnitude pruning (IMP), structured pruning with a distillation objective, and post-training dynamic quantization. Empirically, we show that optimizing for flatter minima consistently leads to greater compressibility of parameters compared to vanilla Adam when fine-tuning BERT models, with little to no loss in accuracy on the GLUE text classification and SQuAD question answering benchmarks. Moreover, SAM finds superior winning tickets during IMP that 1) are amenable to vanilla Adam optimization, and 2) transfer more effectively across tasks.", + "author": "Clara Na; Sanket Vaibhav Mehta; Emma Strubell", + "authorids": "/c/clara-na/; /s/sanket-vaibhav-mehta/; /e/emma-strubell/", + "bibtex": "@inproceedings{na-etal-2022-train,\n title = \"Train Flat, Then Compress: Sharpness-Aware Minimization Learns More Compressible Models\",\n author = \"Na, Clara and\n Mehta, Sanket Vaibhav and\n Strubell, Emma\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.361/\",\n doi = \"10.18653/v1/2022.findings-emnlp.361\",\n pages = \"4909--4936\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.361.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.361/", + "pdf_size": 1483797, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5670863815877729400&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "https://github.com/clarana/train-flat-compress", + "project": "", + "author_num": 3 + }, + { + "id": "2022.emnlp-main.167", + "title": "Training Dynamics for Curriculum Learning: A Study on Monolingual and Cross-lingual NLU", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Curriculum Learning (CL) is a technique of training models via ranking examples in a typically increasing difficulty trend with the aim of accelerating convergence and improving generalisability. Current approaches for Natural Language Understanding (NLU) tasks use CL to improve in-distribution data performance often via heuristic-oriented or task-agnostic difficulties. In this work, instead, we employ CL for NLU by taking advantage of training dynamics as difficulty metrics, i.e., statistics that measure the behavior of the model at hand on specific task-data instances during training and propose modifications of existing CL schedulers based on these statistics. Differently from existing works, we focus on evaluating models on in-distribution (ID), out-of-distribution (OOD) as well as zero-shot (ZS) cross-lingual transfer datasets. We show across several NLU tasks that CL with training dynamics can result in better performance mostly on zero-shot cross-lingual transfer and OOD settings with improvements up by 8.5% in certain cases. Overall, experiments indicate that training dynamics can lead to better performing models with smoother training compared to other difficulty metrics while being 20% faster on average. In addition, through analysis we shed light on the correlations of task-specific versus task-agnostic metrics.", + "author": "Fenia Christopoulou; Gerasimos Lampouras; Ignacio Iacobacci", + "authorids": "/f/fenia-christopoulou/; /g/gerasimos-lampouras/; /i/ignacio-iacobacci/", + "bibtex": "@inproceedings{christopoulou-etal-2022-training,\n title = \"Training Dynamics for Curriculum Learning: A Study on Monolingual and Cross-lingual {NLU}\",\n author = \"Christopoulou, Fenia and\n Lampouras, Gerasimos and\n Iacobacci, Ignacio\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.167/\",\n doi = \"10.18653/v1/2022.emnlp-main.167\",\n pages = \"2595--2611\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.167.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.167/", + "pdf_size": 2861456, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12417637203378857387&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 3, + "aff": "Huawei Noah\u2019s Ark Lab, London, UK; Huawei Noah\u2019s Ark Lab, London, UK; Huawei Noah\u2019s Ark Lab, London, UK", + "aff_domain": "huawei.com;huawei.com;huawei.com", + "email": "huawei.com;huawei.com;huawei.com", + "github": "https://github.com/huawei-noah/noah-research/tree/master/NLP/TD4CL", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Huawei Noah\u2019s Ark Lab", + "aff_unique_dep": "", + "aff_unique_url": "https://www.huawei.com/en/ai", + "aff_unique_abbr": "HNA Lab", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "London", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.382", + "title": "Training Language Models with Memory Augmentation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent work has improved language models (LMs) remarkably by equipping them with a non-parametric memory component. However, most existing approaches only introduce mem-ories at testing time or represent them using a separately trained encoder, resulting in suboptimal training of the language model. In this work, we present TRIME, a novel yet simple training approach designed for training LMs with memory augmentation. Our approach uses a training objective that directly takes in-batch examples as accessible memory. We also present new methods for memory construction and data batching, which are used for adapting to different sets of memories\u2014local, long-term, and external memory\u2014at testing time. We evaluate TRIME on multiple language modeling and machine translation benchmarks and show that it is able to achieve significant improvements across all the settings. Concretely, TRIME reduces the perplexity from 18.70 to 15.37 on WIKITEXT-103, by effectively leveraging a large memory set from the training corpus. Compared to standard LM training, TRIME adds negligible computational overhead and is compatible with different neural architectures, making it a versatile solution for training memory-augmented LMs.", + "author": "Zexuan Zhong; Tao Lei; Danqi Chen", + "authorids": "/z/zexuan-zhong/; /t/tao-lei/; /d/danqi-chen/", + "bibtex": "@inproceedings{zhong-etal-2022-training,\n title = \"Training Language Models with Memory Augmentation\",\n author = \"Zhong, Zexuan and\n Lei, Tao and\n Chen, Danqi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.382/\",\n doi = \"10.18653/v1/2022.emnlp-main.382\",\n pages = \"5657--5673\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.382.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.382/", + "pdf_size": 395025, + "gs_citation": 140, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11175671961658363537&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "https://github.com/princeton-nlp/TRIME", + "project": "", + "author_num": 3 + }, + { + "id": "2022.findings-emnlp.86", + "title": "TranS: Transition-based Knowledge Graph Embedding with Synthetic Relation Representation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Knowledge graph embedding (KGE) aims to learn continuous vector representations of relations and entities in knowledge graph (KG). Recently, transition-based KGE methods have become popular and achieved promising performance. However, scoring patterns like TransE are not suitable for complex scenarios where the same entity pair has different relations. Although some models attempt to employ entity-relation interaction or projection to improve entity representation for one-to-many/many-to-one/many-to-many complex relations, they still continue the traditional scoring pattern, where only a single relation vector in the relation part is used to translate the head entity to the tail entity or their variants. And recent research shows that entity representation only needs to consider entities and their interactions to achieve better performance. Thus, in this paper, we propose a novel transition-based method, TranS, for KGE. The single relation vector of the relation part in the traditional scoring pattern is replaced by the synthetic relation representation with entity-relation interactions to solve these issues. And the entity part still retains its independence through entity-entity interactions. Experiments on a large KG dataset, ogbl-wikikg2, show that our model achieves state-of-the-art results.", + "author": "Xuanyu Zhang; Qing Yang; Dongliang Xu", + "authorids": "/x/xuanyu-zhang/; /q/qing-yang/; /d/dongliang-xu/", + "bibtex": "@inproceedings{zhang-etal-2022-trans,\n title = \"{T}ran{S}: Transition-based Knowledge Graph Embedding with Synthetic Relation Representation\",\n author = \"Zhang, Xuanyu and\n Yang, Qing and\n Xu, Dongliang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.86/\",\n doi = \"10.18653/v1/2022.findings-emnlp.86\",\n pages = \"1202--1208\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.86.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.86/", + "pdf_size": 428293, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4472547016728325231&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 3, + "aff": "Du Xiaoman Financial; Du Xiaoman Financial; Du Xiaoman Financial", + "aff_domain": "duxiaoman.com;duxiaoman.com;duxiaoman.com", + "email": "duxiaoman.com;duxiaoman.com;duxiaoman.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Du Xiaoman Financial", + "aff_unique_dep": "", + "aff_unique_url": "https://www.duxiaoman.com", + "aff_unique_abbr": "DXF", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.583", + "title": "TranSHER: Translating Knowledge Graph Embedding with Hyper-Ellipsoidal Restriction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge graph embedding methods are important for the knowledge graph completion (or link prediction) task.One state-of-the-art method, PairRE, leverages two separate vectors to model complex relations (i.e., 1-to-N, N-to-1, and N-to-N) in knowledge graphs. However, such a method strictly restricts entities on the hyper-ellipsoid surfaces which limits the optimization of entity distribution, leading to suboptimal performance of knowledge graph completion. To address this issue, we propose a novel score function TranSHER, which leverages relation-specific translations between head and tail entities to relax the constraint of hyper-ellipsoid restrictions. By introducing an intuitive and simple relation-specific translation, TranSHER can provide more direct guidance on optimization and capture more semantic characteristics of entities with complex relations. Experimental results show that TranSHER achieves state-of-the-art performance on link prediction and generalizes well to datasets in different domains and scales. Our codes are public available athttps://github.com/yizhilll/TranSHER.", + "author": "Yizhi Li; Wei Fan; Chao Liu; Chenghua Lin; Jiang Qian", + "authorids": "/y/yizhi-li/; /w/wei-fan/; /c/chao-liu/; /c/chenghua-lin/; /j/jiang-qian/", + "bibtex": "@inproceedings{li-etal-2022-transher,\n title = \"{T}ran{SHER}: Translating Knowledge Graph Embedding with Hyper-Ellipsoidal Restriction\",\n author = \"Li, Yizhi and\n Fan, Wei and\n Liu, Chao and\n Lin, Chenghua and\n Qian, Jiang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.583/\",\n doi = \"10.18653/v1/2022.emnlp-main.583\",\n pages = \"8517--8528\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.583.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.583/", + "pdf_size": 452180, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=451866560951309274&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, The University of Sheffield, UK; Department of Computer Science, University of Central Florida, USA; Pingan Technology, China; Department of Computer Science, The University of Sheffield, UK; Pingan Technology, China", + "aff_domain": "sheffield.ac.uk;knights.ucf.edu;mail.ustc.edu.cn;sheffield.ac.uk;126.com", + "email": "sheffield.ac.uk;knights.ucf.edu;mail.ustc.edu.cn;sheffield.ac.uk;126.com", + "github": "https://github.com/yizhilll/TranSHER", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;0;2", + "aff_unique_norm": "The University of Sheffield;University of Central Florida;Pingan Technology", + "aff_unique_dep": "Department of Computer Science;Department of Computer Science;", + "aff_unique_url": "https://www.sheffield.ac.uk;https://www.ucf.edu;https://www.pingan.com", + "aff_unique_abbr": "Sheffield;UCF;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;2;0;2", + "aff_country_unique": "United Kingdom;United States;China" + }, + { + "id": "2022.findings-emnlp.52", + "title": "TransAdv: A Translation-based Adversarial Learning Framework for Zero-Resource Cross-Lingual Named Entity Recognition", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Zero-Resource Cross-Lingual Named Entity Recognition aims at training an NER model of the target language using only labeled source language data and unlabeled target language data. Existing methods are mainly divided into three categories: model transfer based, data transfer based and knowledge transfer based. Each method has its own disadvantages, and combining more than one of them often leads to better performance. However, the performance of data transfer based methods is often limited by inevitable noise in the translation process. To handle the problem, we propose a framework named TransAdv to mitigate lexical and syntactic errors of word-by-word translated data, better utilizing the data by multi-level adversarial learning and multi-model knowledge distillation. Extensive experiments are conducted over 6 target languages with English as the source language, and the results show that TransAdv achieves competitive performance to the state-of-the-art models.", + "author": "Yichun Zhao; Jintao Du; Gongshen Liu; Huijia Zhu", + "authorids": "/y/yichun-zhao/; /j/jintao-du/; /g/gongshen-liu/; /h/huijia-zhu/", + "bibtex": "@inproceedings{zhao-etal-2022-transadv,\n title = \"{T}rans{A}dv: A Translation-based Adversarial Learning Framework for Zero-Resource Cross-Lingual Named Entity Recognition\",\n author = \"Zhao, Yichun and\n Du, Jintao and\n Liu, Gongshen and\n Zhu, Huijia\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.52/\",\n doi = \"10.18653/v1/2022.findings-emnlp.52\",\n pages = \"742--749\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.52.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.52/", + "pdf_size": 419084, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17443663695409557939&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "", + "project": "", + "author_num": 4 + }, + { + "id": "2022.findings-emnlp.513", + "title": "TransLIST: A Transformer-Based Linguistically Informed Sanskrit Tokenizer", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Sanskrit Word Segmentation (SWS) is essential in making digitized texts available and in deploying downstream tasks. It is, however, non-trivial because of the sandhi phenomenon that modifies the characters at the word boundaries, and needs special treatment. Existing lexicon driven approaches for SWS make use of Sanskrit Heritage Reader, a lexicon-driven shallow parser, to generate the complete candidate solution space, over which various methods are applied to produce the most valid solution. However, these approaches fail while encountering out-of-vocabulary tokens. On the other hand, purely engineering methods for SWS have made use of recent advances in deep learning, but cannot make use of the latent word information on availability. To mitigate the shortcomings of both families of approaches, we propose Transformer based Linguistically Informed Sanskrit Tokenizer (TransLIST) consisting of (1) a module that encodes the character input along with latent-word information, which takes into account the sandhi phenomenon specific to SWS and is apt to work with partial or no candidate solutions, (2) a novel soft-masked attention to prioritize potential candidate words and (3) a novel path ranking algorithm to rectify the corrupted predictions. Experiments on the benchmark datasets for SWS show that TransLIST outperforms the current state-of-the-art system by an average 7.2 points absolute gain in terms of perfect match (PM) metric.", + "author": "Jivnesh Sandhan; Rathin Singha; Narein Rao; Suvendu Samanta; Laxmidhar Behera; Pawan Goyal", + "authorids": "/j/jivnesh-sandhan/; /r/rathin-singha/; /n/narein-rao/; /s/suvendu-samanta/; /l/laxmidhar-behera/; /p/pawan-goyal/", + "bibtex": "@inproceedings{sandhan-etal-2022-translist,\n title = \"{T}rans{LIST}: A Transformer-Based Linguistically Informed {S}anskrit Tokenizer\",\n author = \"Sandhan, Jivnesh and\n Singha, Rathin and\n Rao, Narein and\n Samanta, Suvendu and\n Behera, Laxmidhar and\n Goyal, Pawan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.513/\",\n doi = \"10.18653/v1/2022.findings-emnlp.513\",\n pages = \"6902--6912\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.513.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.513/", + "pdf_size": 1225797, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3590472580954191007&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "IIT Kanpur; UCLA; IIT Kanpur; IIT Kanpur; IIT Kanpur+IIT Mandi; IIT Kharagpur", + "aff_domain": "iitk.ac.in;g.ucla.edu;iitk.ac.in; ; ;cse.iitkgp.ac.in", + "email": "iitk.ac.in;g.ucla.edu;iitk.ac.in; ; ;cse.iitkgp.ac.in", + "github": "https://github.com/rsingha108/TransLIST", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;0;0;0+2;3", + "aff_unique_norm": "Indian Institute of Technology Kanpur;University of California, Los Angeles;Indian Institute of Technology Mandi;Indian Institute of Technology Kharagpur", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.iitk.ac.in;https://www.ucla.edu;https://www.iitmandi.ac.in;https://www.iitkgp.ac.in", + "aff_unique_abbr": "IITK;UCLA;IIT Mandi;IIT KGP", + "aff_campus_unique_index": "0;1;0;0;0+2;3", + "aff_campus_unique": "Kanpur;Los Angeles;Mandi;Kharagpur", + "aff_country_unique_index": "0;1;0;0;0+0;0", + "aff_country_unique": "India;United States" + }, + { + "id": "2022.emnlp-main.169", + "title": "Transfer Learning from Semantic Role Labeling to Event Argument Extraction with Template-based Slot Querying", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this work, we investigate transfer learning from semantic role labeling (SRL) to event argument extraction (EAE), considering their similar argument structures. We view the extraction task as a role querying problem, unifying various methods into a single framework. There are key discrepancies on role labels and distant arguments between semantic role and event argument annotations. To mitigate these discrepancies, we specify natural language-like queries to tackle the label mismatch problem and devise argument augmentation to recover distant arguments. We show that SRL annotations can serve as a valuable resource for EAE, and a template-based slot querying strategy is especially effective for facilitating the transfer. In extensive evaluations on two English EAE benchmarks, our proposed model obtains impressive zero-shot results by leveraging SRL annotations, reaching nearly 80% of the fullysupervised scores. It further provides benefits in low-resource cases, where few EAE annotations are available. Moreover, we show that our approach generalizes to cross-domain and multilingual scenarios.", + "author": "Zhisong Zhang; Emma Strubell; Eduard Hovy", + "authorids": "/z/zhisong-zhang/; /e/emma-strubell/; /e/eduard-hovy/", + "bibtex": "@inproceedings{zhang-etal-2022-transfer,\n title = \"Transfer Learning from Semantic Role Labeling to Event Argument Extraction with Template-based Slot Querying\",\n author = \"Zhang, Zhisong and\n Strubell, Emma and\n Hovy, Eduard\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.169/\",\n doi = \"10.18653/v1/2022.emnlp-main.169\",\n pages = \"2627--2647\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.169.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.169/", + "pdf_size": 644053, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12062629943487660009&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University; Language Technologies Institute, Carnegie Mellon University", + "aff_domain": "cs.cmu.edu;cmu.edu;cmu.edu", + "email": "cs.cmu.edu;cmu.edu;cmu.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Carnegie Mellon University", + "aff_unique_dep": "Language Technologies Institute", + "aff_unique_url": "https://www.cmu.edu", + "aff_unique_abbr": "CMU", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Pittsburgh", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.413", + "title": "Transfer Learning with Synthetic Corpora for Spatial Role Labeling and Reasoning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent research shows synthetic data as a source of supervision helps pretrained language models (PLM) transfer learning to new target tasks/domains. However, this idea is less explored for spatial language. We provide two new data resources on multiple spatial language processing tasks. The first dataset is synthesized for transfer learning on spatial question answering (SQA) and spatial role labeling (SpRL). Compared to previous SQA datasets, we include a larger variety of spatial relation types and spatial expressions. Our data generation process is easily extendable with new spatial expression lexicons. The second one is a real-world SQA dataset with human-generated questions built on an existing corpus with SPRL annotations. This dataset can be used to evaluate spatial language processing models in realistic situations. We show pretraining with automatically generated data significantly improves the SOTA results on several SQA and SPRL benchmarks, particularly when the training data in the target domain is small.", + "author": "Roshanak Mirzaee; Parisa Kordjamshidi", + "authorids": "/r/roshanak-mirzaee/; /p/parisa-kordjamshidi/", + "bibtex": "@inproceedings{mirzaee-kordjamshidi-2022-transfer,\n title = \"Transfer Learning with Synthetic Corpora for Spatial Role Labeling and Reasoning\",\n author = \"Mirzaee, Roshanak and\n Kordjamshidi, Parisa\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.413/\",\n doi = \"10.18653/v1/2022.emnlp-main.413\",\n pages = \"6148--6165\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.413.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.413/", + "pdf_size": 1452978, + "gs_citation": 29, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11409907568760826257&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Michigan State University; Michigan State University", + "aff_domain": "msu.edu;msu.edu", + "email": "msu.edu;msu.edu", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Michigan State University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.msu.edu", + "aff_unique_abbr": "MSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.3", + "title": "Transformer Feed-Forward Layers Build Predictions by Promoting Concepts in the Vocabulary Space", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Transformer-based language models (LMs) are at the core of modern NLP, but their internal prediction construction process is opaque and largely not understood. In this work, we make a substantial step towards unveiling this underlying prediction process, by reverse-engineering the operation of the feed-forward network (FFN) layers, one of the building blocks of transformer models. We view the token representation as a changing distribution over the vocabulary, and the output from each FFN layer as an additive update to that distribution. Then, we analyze the FFN updates in the vocabulary space, showing that each update can be decomposed to sub-updates corresponding to single FFN parameter vectors, each promoting concepts that are often human-interpretable. We then leverage these findings for controlling LM predictions, where we reduce the toxicity of GPT2 by almost 50%, and for improving computation efficiency with a simple early exit rule, saving 20% of computation on average.", + "author": "Mor Geva; Avi Caciularu; Kevin Wang; Yoav Goldberg", + "authorids": "/m/mor-geva/; /a/avi-caciularu/; /k/kevin-wang/; /y/yoav-goldberg/", + "bibtex": "@inproceedings{geva-etal-2022-transformer,\n title = \"Transformer Feed-Forward Layers Build Predictions by Promoting Concepts in the Vocabulary Space\",\n author = \"Geva, Mor and\n Caciularu, Avi and\n Wang, Kevin and\n Goldberg, Yoav\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.3/\",\n doi = \"10.18653/v1/2022.emnlp-main.3\",\n pages = \"30--45\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.3.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.3/", + "pdf_size": 590926, + "gs_citation": 328, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6973206644927132107&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Allen Institute for AI+Bar-Ilan University; Bar-Ilan University; Independent Researcher; Allen Institute for AI+Bar-Ilan University", + "aff_domain": "allenai.org;gmail.com;gmail.com;gmail.com", + "email": "allenai.org;gmail.com;gmail.com;gmail.com", + "github": "https://github.com/aviclu/ffn-values", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1;2;0+1", + "aff_unique_norm": "Allen Institute for AI;Bar-Ilan University;Independent Researcher", + "aff_unique_dep": ";;", + "aff_unique_url": "https://allenai.org;https://www.biu.ac.il;", + "aff_unique_abbr": "AI2;BIU;", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1;1;0+1", + "aff_country_unique": "United States;Israel;" + }, + { + "id": "2022.findings-emnlp.99", + "title": "Transformer Language Models without Positional Encodings Still Learn Positional Information", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Causal transformer language models (LMs), such as GPT-3, typically require some form of positional encoding, such as positional embeddings. However, we show that LMs without any explicit positional encoding are still competitive with standard models and that this phenomenon is robust across different datasets, model sizes, and sequence lengths.Probing experiments reveal that such models acquire an implicit notion of absolute positions throughout the network, effectively compensating for the missing information.We conjecture that causal attention enables the model to infer the number of predecessors that each token can attend to, thereby approximating its absolute position.Our findings indicate that causal LMs might derive positional awareness not only from the explicit positioning mechanism but also from the effects of the causal mask.", + "author": "Adi Haviv; Ori Ram; Ofir Press; Peter Izsak; Omer Levy", + "authorids": "/a/adi-haviv/; /o/ori-ram/; /o/ofir-press/; /p/peter-izsak/; /o/omer-levy/", + "bibtex": "@inproceedings{haviv-etal-2022-transformer,\n title = \"Transformer Language Models without Positional Encodings Still Learn Positional Information\",\n author = \"Haviv, Adi and\n Ram, Ori and\n Press, Ofir and\n Izsak, Peter and\n Levy, Omer\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.99/\",\n doi = \"10.18653/v1/2022.findings-emnlp.99\",\n pages = \"1382--1390\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.99.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.99/", + "pdf_size": 245627, + "gs_citation": 119, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14304956244977218000&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Tel Aviv University; Tel Aviv University; University of Washington; Intel Labs; Meta AI", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;2;3", + "aff_unique_norm": "Tel Aviv University;University of Washington;Intel Corporation;Meta Platforms, Inc.", + "aff_unique_dep": ";;Intel Labs;Meta AI", + "aff_unique_url": "https://www.tau.ac.il;https://www.washington.edu;https://www.intel.com;https://meta.com", + "aff_unique_abbr": "TAU;UW;Intel;Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;1;1", + "aff_country_unique": "Israel;United States" + }, + { + "id": "2022.emnlp-main.402", + "title": "Transformer-based Entity Typing in Knowledge Graphs", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We investigate the knowledge graph entity typing task which aims at inferring plausible entity types. In this paper, we propose a novel Transformer-based Entity Typing (TET) approach, effectively encoding the content of neighbours of an entity by means of a transformer mechanism. More precisely, TET is composed of three different mechanisms: a local transformer allowing to infer missing entity types by independently encoding the information provided by each of its neighbours; a global transformer aggregating the information of all neighbours of an entity into a single long sequence to reason about more complex entity types; and a context transformer integrating neighbours content in a differentiated way through information exchange between neighbour pairs, while preserving the graph structure. Furthermore, TET uses information about class membership of types to semantically strengthen the representation of an entity. Experiments on two real-world datasets demonstrate the superior performance of TET compared to the state-of-the-art.", + "author": "Zhiwei Hu; Victor Gutierrez-Basulto; Zhiliang Xiang; Ru Li; Jeff Pan", + "authorids": "/z/zhiwei-hu/; /v/victor-gutierrez-basulto/; /z/zhiliang-xiang/; /r/ru-li/; /j/jeff-pan/", + "bibtex": "@inproceedings{hu-etal-2022-transformer,\n title = \"Transformer-based Entity Typing in Knowledge Graphs\",\n author = \"Hu, Zhiwei and\n Gutierrez-Basulto, Victor and\n Xiang, Zhiliang and\n Li, Ru and\n Pan, Jeff\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.402/\",\n doi = \"10.18653/v1/2022.emnlp-main.402\",\n pages = \"5988--6001\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.402.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.402/", + "pdf_size": 749047, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8274842296384565596&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff": "School of Computer and Information Technology, Shanxi University, China; School of Computer Science and Informatics, Cardiff University, UK; School of Computer Science and Informatics, Cardiff University, UK; School of Computer and Information Technology, Shanxi University, China + ILCC, School of Informatics, University of Edinburgh, UK; ILCC, School of Informatics, University of Edinburgh, UK", + "aff_domain": "whu.edu.cn;cardiff.ac.uk;cardiff.ac.uk;sxu.edu.cn;ed.ac.uk", + "email": "whu.edu.cn;cardiff.ac.uk;cardiff.ac.uk;sxu.edu.cn;ed.ac.uk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;0+2;2", + "aff_unique_norm": "Shanxi University;Cardiff University;University of Edinburgh", + "aff_unique_dep": "School of Computer and Information Technology;School of Computer Science and Informatics;School of Informatics", + "aff_unique_url": "http://www.sxu.edu.cn;https://www.cardiff.ac.uk;https://www.ed.ac.uk", + "aff_unique_abbr": ";Cardiff;Edinburgh", + "aff_campus_unique_index": "1;1;2;2", + "aff_campus_unique": ";Cardiff;Edinburgh", + "aff_country_unique_index": "0;1;1;0+1;1", + "aff_country_unique": "China;United Kingdom" + }, + { + "id": "2022.emnlp-main.813", + "title": "Transforming Sequence Tagging Into A Seq2Seq Task", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pretrained, large, generative language models (LMs) have had great success in a wide range of sequence tagging and structured prediction tasks. Casting a sequence tagging task as a Seq2Seq one requires deciding the formats of the input and output sequences. However, we lack a principled understanding of the trade-offs associated with these formats (such as the effect on model accuracy, sequence length, multilingual generalization, hallucination). In this paper, we rigorously study different formats one could use for casting input text sentences and their output labels into the input and target (i.e., output) of a Seq2Seq model. Along the way, we introduce a new format, which we show to to be both simpler and more effective. Additionally the new format demonstrates significant gains in the multilingual settings \u2013 both zero-shot transfer learning and joint training. Lastly, we find that the new format is more robust and almost completely devoid of hallucination \u2013 an issue we find common in existing formats. With well over a 1000 experiments studying 14 different formats, over 7 diverse public benchmarks \u2013 including 3 multilingual datasets spanning 7 languages \u2013 we believe our findings provide a strong empirical basis in understanding how we should tackle sequence tagging tasks.", + "author": "Karthik Raman; Iftekhar Naim; Jiecao Chen; Kazuma Hashimoto; Kiran Yalasangi; Krishna Srinivasan", + "authorids": "/k/karthik-raman/; /i/iftekhar-naim/; /j/jiecao-chen/; /k/kazuma-hashimoto/; /k/kiran-yalasangi/; /k/krishna-srinivasan/", + "bibtex": "@inproceedings{raman-etal-2022-transforming,\n title = \"Transforming Sequence Tagging Into A {S}eq2{S}eq Task\",\n author = \"Raman, Karthik and\n Naim, Iftekhar and\n Chen, Jiecao and\n Hashimoto, Kazuma and\n Yalasangi, Kiran and\n Srinivasan, Krishna\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.813/\",\n doi = \"10.18653/v1/2022.emnlp-main.813\",\n pages = \"11856--11874\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.813.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.813/", + "pdf_size": 286365, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7247243155142909766&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;;", + "aff_domain": ";;;;;", + "email": ";;;;;", + "github": "", + "project": "", + "author_num": 6 + }, + { + "id": "2022.findings-emnlp.91", + "title": "Translating Hanja Historical Documents to Contemporary Korean and English", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The Annals of Joseon Dynasty (AJD) contain the daily records of the Kings of Joseon, the 500-year kingdom preceding the modern nation of Korea.The Annals were originally written in an archaic Korean writing system, \u2018Hanja\u2019, and were translated into Korean from 1968 to 1993.The resulting translation was however too literal and contained many archaic Korean words; thus, a new expert translation effort began in 2012. Since then, the records of only one king have been completed in a decade.In parallel, expert translators are working on English translation, also at a slow pace and produced only one king\u2019s records in English so far.Thus, we propose H2KE, a neural machine translation model, that translates historical documents in Hanja to more easily understandable Korean and to English.Built on top of multilingual neural machine translation, H2KE learns to translate a historical document written in Hanja, from both a full dataset of outdated Korean translation and a small dataset of more recently translated contemporary Korean and English.We compare our method against two baselines:a recent model that simultaneously learns to restore and translate Hanja historical documentand a Transformer based model trained only on newly translated corpora.The experiments reveal that our method significantly outperforms the baselines in terms of BLEU scores for both contemporary Korean and English translations.We further conduct extensive human evaluation which shows that our translation is preferred over the original expert translations by both experts and non-expert Korean speakers.", + "author": "Juhee Son; Jiho Jin; Haneul Yoo; JinYeong Bak; Kyunghyun Cho; Alice Oh", + "authorids": "/j/juhee-son/; /j/jiho-jin/; /h/haneul-yoo/; /j/jinyeong-bak/; /k/kyunghyun-cho/; /a/alice-oh/", + "bibtex": "@inproceedings{son-etal-2022-translating,\n title = \"Translating Hanja Historical Documents to Contemporary {K}orean and {E}nglish\",\n author = \"Son, Juhee and\n Jin, Jiho and\n Yoo, Haneul and\n Bak, JinYeong and\n Cho, Kyunghyun and\n Oh, Alice\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.91/\",\n doi = \"10.18653/v1/2022.findings-emnlp.91\",\n pages = \"1260--1272\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.91.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.91/", + "pdf_size": 2470826, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3782342395519871879&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 8, + "aff": "KAIST; KAIST; KAIST; Sungkyunkwan University; New York University+Genentech; KAIST", + "aff_domain": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;skku.edu;nyu.edu;kaist.edu", + "email": "kaist.ac.kr;kaist.ac.kr;kaist.ac.kr;skku.edu;nyu.edu;kaist.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;2+3;0", + "aff_unique_norm": "Korea Advanced Institute of Science and Technology;Sungkyunkwan University;New York University;Genentech", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.kaist.ac.kr;https://www.skku.edu;https://www.nyu.edu;https://www.genentech.com", + "aff_unique_abbr": "KAIST;SKKU;NYU;Genentech", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1+1;0", + "aff_country_unique": "South Korea;United States" + }, + { + "id": "2022.emnlp-main.26", + "title": "Translation between Molecules and Natural Language", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We present MolT5 - a self-supervised learning framework for pretraining models on a vast amount of unlabeled natural language text and molecule strings. MolT5 allows for new, useful, and challenging analogs of traditional vision-language tasks, such as molecule captioning and text-based de novo molecule generation (altogether: translation between molecules and language), which we explore for the first time. Since MolT5 pretrains models on single-modal data, it helps overcome the chemistry domain shortcoming of data scarcity. Furthermore, we consider several metrics, including a new cross-modal embedding-based metric, to evaluate the tasks of molecule captioning and text-based molecule generation. Our results show that MolT5-based models are able to generate outputs, both molecules and captions, which in many cases are high quality.", + "author": "Carl Edwards; Tuan Lai; Kevin Ros; Garrett Honke; Kyunghyun Cho; Heng Ji", + "authorids": "/c/carl-edwards/; /t/tuan-lai/; /k/kevin-ros/; /g/garrett-honke/; /k/kyunghyun-cho/; /h/heng-ji/", + "bibtex": "@inproceedings{edwards-etal-2022-translation,\n title = \"Translation between Molecules and Natural Language\",\n author = \"Edwards, Carl and\n Lai, Tuan and\n Ros, Kevin and\n Honke, Garrett and\n Cho, Kyunghyun and\n Ji, Heng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.26/\",\n doi = \"10.18653/v1/2022.emnlp-main.26\",\n pages = \"375--413\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.26.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.26/", + "pdf_size": 2781620, + "gs_citation": 225, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3774217736601927222&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff": "University of Illinois Urbana-Champaign; University of Illinois Urbana-Champaign + X, the Moonshot Factory; University of Illinois Urbana-Champaign; X, the Moonshot Factory; New York University + Genentech; University of Illinois Urbana-Champaign", + "aff_domain": "illinois.edu;illinois.edu;illinois.edu;google.com;nyu.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu;google.com;nyu.edu;illinois.edu", + "github": "github.com/blender-nlp/MolT5", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0;1;2+3;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;X Development LLC;New York University;Genentech", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://illinois.edu;https://xdevllc.com;https://www.nyu.edu;https://www.genentech.com", + "aff_unique_abbr": "UIUC;X;NYU;Genentech", + "aff_campus_unique_index": "0;0;0;;0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0+0;0;0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.476", + "title": "Trial2Vec: Zero-Shot Clinical Trial Document Similarity Search using Self-Supervision", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Clinical trials are essential for drug development but are extremely expensive and time-consuming to conduct. It is beneficial to study similar historical trials when designing a clinical trial. However, lengthy trial documents and lack of labeled data make trial similarity search difficult. We propose a zero-shotclinical trial retrieval method, called Trial2Vec, which learns through self-supervision without the need for annotating similar clinical trials. Specifically, the meta-structure of trial documents (e.g., title, eligibility criteria, target disease) along with clinical knowledge (e.g., UMLS knowledge base) are leveraged to automatically generate contrastive samples. Besides, encodes trial documents considering meta-structure thus producing compact embeddings aggregating multi-aspect information from the whole document. We show that our method yields medically interpretable embeddings by visualization and it gets 15% average improvement over the best baselines on precision/recall for trial retrieval, which is evaluated on our labeled 1600 trial pairs. In addition, we prove the pretrained embeddings benefit the downstream trial outcome prediction task over 240k trials. Software is available at https://github.com/RyanWangZf/Trial2Vec.", + "author": "Zifeng Wang; Jimeng Sun", + "authorids": "/z/zifeng-wang/; /j/jimeng-sun/", + "bibtex": "@inproceedings{wang-sun-2022-trial2vec,\n title = \"{T}rial2{V}ec: Zero-Shot Clinical Trial Document Similarity Search using Self-Supervision\",\n author = \"Wang, Zifeng and\n Sun, Jimeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.476/\",\n doi = \"10.18653/v1/2022.findings-emnlp.476\",\n pages = \"6377--6390\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.476.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.476/", + "pdf_size": 499101, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6695818200976123683&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science, University of Illinois Urbana-Champaign + Carle Illinois College of Medicine, University of Illinois Urbana-Champaign; Department of Computer Science, University of Illinois Urbana-Champaign + Carle Illinois College of Medicine, University of Illinois Urbana-Champaign", + "aff_domain": "illinois.edu;illinois.edu", + "email": "illinois.edu;illinois.edu", + "github": "https://github.com/RyanWangZf/Trial2Vec", + "project": "", + "author_num": 2, + "aff_unique_index": "0+0;0+0", + "aff_unique_norm": "University of Illinois Urbana-Champaign", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://illinois.edu", + "aff_unique_abbr": "UIUC", + "aff_campus_unique_index": "0+0;0+0", + "aff_campus_unique": "Urbana-Champaign", + "aff_country_unique_index": "0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.249", + "title": "Truncation Sampling as Language Model Desmoothing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Long samples of text from neural language models can be of poor quality. Truncation sampling algorithms\u2013like top-p or top-k\u2014address this by setting some words\u2019 probabilities to zero at each step. This work investigates why these methods are important, and how to improve them. We propose thinking of a neural language model as a mixture of a true distribution and a smoothing distribution that avoids infinite perplexity. In this light, truncation algorithms aim to perform desmoothing, estimating a subset of the support of the true distribution. Finding a good subset is crucial: we show that top-p unnecessarily truncates high-probability words, for example causing it to truncate all words but Trump for a document that starts with Donald. We introduce eta-sampling, which truncates words below an entropy-dependent probability threshold. Compared to previous algorithms, our eta-sampling generates more plausible long documents according to humans, is better at breaking out of repetition, and behaves more reasonably on a battery of test distributions.", + "author": "John Hewitt; Christopher Manning; Percy Liang", + "authorids": "/j/john-hewitt/; /c/christopher-d-manning/; /p/percy-liang/", + "bibtex": "@inproceedings{hewitt-etal-2022-truncation,\n title = \"Truncation Sampling as Language Model Desmoothing\",\n author = \"Hewitt, John and\n Manning, Christopher and\n Liang, Percy\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.249/\",\n doi = \"10.18653/v1/2022.findings-emnlp.249\",\n pages = \"3414--3427\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.249.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.249/", + "pdf_size": 1127698, + "gs_citation": 70, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5219749096383721246&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Department of Computer Science, Stanford University; Department of Computer Science, Stanford University; Department of Computer Science, Stanford University", + "aff_domain": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", + "email": "cs.stanford.edu;cs.stanford.edu;cs.stanford.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "Stanford University", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.stanford.edu", + "aff_unique_abbr": "Stanford", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Stanford", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.167", + "title": "Turning Fixed to Adaptive: Integrating Post-Evaluation into Simultaneous Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Simultaneous machine translation (SiMT) starts its translation before reading the whole source sentence and employs either fixed or adaptive policy to generate the target sentence. Compared to the fixed policy, the adaptive policy achieves better latency-quality tradeoffs by adopting a flexible translation policy. If the policy can evaluate rationality before taking action, the probability of incorrect actions will also decrease. However, previous methods lack evaluation of actions before taking them. In this paper, we propose a method of performing the adaptive policy via integrating post-evaluation into the fixed policy. Specifically, whenever a candidate token is generated, our model will evaluate the rationality of the next action by measuring the change in the source content. Our model will then take different actions based on the evaluation results. Experiments on three translation tasks show that our method can exceed strong baselines under all latency.", + "author": "Shoutao Guo; Shaolei Zhang; Yang Feng", + "authorids": "/s/shoutao-guo/; /s/shaolei-zhang/; /y/yang-feng/", + "bibtex": "https://aclanthology.org/2022.findings-emnlp.167.bib", + "pdf": "https://aclanthology.org/2022.findings-emnlp.167.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.167/", + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17418067581896799357&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": ";;", + "aff_domain": ";;", + "email": ";;", + "github": "", + "project": "", + "author_num": 3 + }, + { + "id": "2022.emnlp-main.498", + "title": "Tutoring Helps Students Learn Better: Improving Knowledge Distillation for BERT with Tutor Network", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained language models have achieved remarkable successes in natural language processing tasks, coming at the cost of increasing model size. To address this issue, knowledge distillation (KD) has been widely applied to compress language models. However, typical KD approaches for language models have overlooked the difficulty of training examples, suffering from incorrect teacher prediction transfer and sub-efficient training. In this paper, we propose a novel KD framework, Tutor-KD, which improves the distillation effectiveness by controlling the difficulty of training examples during pre-training. We introduce a tutor network that generates samples that are easy for the teacher but difficult for the student, with training on a carefully designed policy gradient method. Experimental results show that Tutor-KD significantly and consistently outperforms the state-of-the-art KD methods with variously sized student models on the GLUE benchmark, demonstrating that the tutor can effectively generate training examples for the student.", + "author": "Junho Kim; Jun-Hyung Park; Mingyu Lee; Wing-Lam Mok; Joon-Young Choi; SangKeun Lee", + "authorids": "/j/junho-kim/; /j/jun-hyung-park/; /m/mingyu-lee/; /w/wing-lam-mok/; /j/joon-young-choi/; /s/sangkeun-lee/", + "bibtex": "@inproceedings{kim-etal-2022-tutoring,\n title = \"Tutoring Helps Students Learn Better: Improving Knowledge Distillation for {BERT} with Tutor Network\",\n author = \"Kim, Junho and\n Park, Jun-Hyung and\n Lee, Mingyu and\n Mok, Wing-Lam and\n Choi, Joon-Young and\n Lee, SangKeun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.498/\",\n doi = \"10.18653/v1/2022.emnlp-main.498\",\n pages = \"7371--7382\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.498.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.498/", + "pdf_size": 914020, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3646880867282004642&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Department of Artificial Intelligence+Department of Computer Science and Engineering, Korea University; Department of Artificial Intelligence+Department of Computer Science and Engineering, Korea University; Department of Artificial Intelligence, Korea University; Department of Artificial Intelligence, Korea University; Department of Artificial Intelligence, Korea University; Department of Artificial Intelligence+Department of Computer Science and Engineering, Korea University", + "aff_domain": "korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr", + "github": "https://github.com/JunhoKim94/TutorKD/", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;0+1;1;1;1;0+1", + "aff_unique_norm": "Department of Artificial Intelligence;Korea University", + "aff_unique_dep": "Artificial Intelligence;Department of Computer Science and Engineering", + "aff_unique_url": ";https://www.korea.ac.kr", + "aff_unique_abbr": ";KU", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "1;1;1;1;1;1", + "aff_country_unique": ";South Korea" + }, + { + "id": "2022.findings-emnlp.471", + "title": "Tweet Based Reach Aware Temporal Attention Network for NFT Valuation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Non-Fungible Tokens (NFTs) are a relatively unexplored class of assets. Designing strategies to forecast NFT trends is an intricate task due to its extremely volatile nature. The market is largely driven by public sentiment and \u201chype\u201d, which in turn has a high correlation with conversations taking place on social media platforms like Twitter. Prior work done for modelling stock market data does not take into account the extent of impact certain highly influential tweets and their authors can have on the market. Building on these limitations and the nature of the NFT market, we propose a novel reach-aware temporal learning approach to make predictions for forecasting future trends in the NFT market. We perform experiments on a new dataset consisting of over 1.3 million tweets and 180 thousand NFT transactions spanning over 15 NFT collections curated by us. Our model (TA-NFT) outperforms other state-of-the-art methods by an average of 36%. Through extensive quantitative and ablative analysis, we demonstrate the ability of our approach as a practical method for predicting NFT trends.", + "author": "Ramit Sawhney; Megh Thakkar; Ritesh Soun; Atula Neerkaje; Vasu Sharma; Dipanwita Guhathakurta; Sudheer Chava", + "authorids": "/r/ramit-sawhney/; /m/megh-thakkar/; /r/ritesh-soun/; /a/atula-neerkaje/; /v/vasu-sharma/; /d/dipanwita-guhathakurta/; /s/sudheer-chava/", + "bibtex": "@inproceedings{sawhney-etal-2022-tweet,\n title = \"Tweet Based Reach Aware Temporal Attention Network for {NFT} Valuation\",\n author = \"Sawhney, Ramit and\n Thakkar, Megh and\n Soun, Ritesh and\n Neerkaje, Atula and\n Sharma, Vasu and\n Guhathakurta, Dipanwita and\n Chava, Sudheer\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.471/\",\n doi = \"10.18653/v1/2022.findings-emnlp.471\",\n pages = \"6321--6332\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.471.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.471/", + "pdf_size": 1692160, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15525241892552269789&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "Georgia Tech; BITS, Pilani; SVC, DU; MIT, Manipal; Amazon Science; IIIT, Hyderabad; Georgia Tech", + "aff_domain": "gatech.edu; ; ; ; ; ;scheller.gatech.edu", + "email": "gatech.edu; ; ; ; ; ;scheller.gatech.edu", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;2;3;4;5;0", + "aff_unique_norm": "Georgia Institute of Technology;Birla Institute of Technology and Science;Duke University;Manipal Institute of Technology;Amazon;International Institute of Information Technology", + "aff_unique_dep": ";;Statistics and Data Science;;Amazon Science;", + "aff_unique_url": "https://www.gatech.edu;https://www.bits-pilani.ac.in;https://www.duke.edu;https://mit manipal.edu;https://www.amazon.science;https://iiit Hyderabad.ac.in", + "aff_unique_abbr": "Georgia Tech;BITS Pilani;Duke;MIT Manipal;Amazon Science;IIIT-H", + "aff_campus_unique_index": "1;2;3", + "aff_campus_unique": ";Pilani;Manipal;Hyderabad", + "aff_country_unique_index": "0;1;0;1;0;1;0", + "aff_country_unique": "United States;India" + }, + { + "id": "2022.emnlp-main.326", + "title": "Twist Decoding: Diverse Generators Guide Each Other", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Many language generation models are now available for a wide range of generation tasks, including machine translation and summarization. Combining such diverse models may lead to further progress, but ensembling generation models is challenging during inference: conventional ensembling methods (e.g., shallow fusion) require that the models share vocabulary/tokenization schemes. We introduce Twist decoding, a simple and general text generation algorithm that benefits from diverse models at inference time. Our method does not assume the vocabulary, tokenization or even generation order is shared. Our extensive evaluations on machine translation and scientific paper summarization demonstrate that Twist decoding substantially outperforms each model decoded in isolation over various scenarios, including cases where domain-specific and general-purpose models are both available. Twist decoding also consistently outperforms the popular reranking heuristic where output candidates from one model are rescored by another. We hope that our work will encourage researchers and practitioners to examine generation models collectively, not just independently, and to seek out models with complementary strengths to the currently available models.", + "author": "Jungo Kasai; Keisuke Sakaguchi; Ronan Le Bras; Hao Peng; Ximing Lu; Dragomir Radev; Yejin Choi; Noah A. Smith", + "authorids": "/j/jungo-kasai/; /k/keisuke-sakaguchi/; /r/ronan-le-bras/; /h/hao-peng/; /x/ximing-lu/; /d/dragomir-radev/; /y/yejin-choi/; /n/noah-a-smith/", + "bibtex": "@inproceedings{kasai-etal-2022-twist,\n title = \"Twist Decoding: Diverse Generators Guide Each Other\",\n author = \"Kasai, Jungo and\n Sakaguchi, Keisuke and\n Le Bras, Ronan and\n Peng, Hao and\n Lu, Ximing and\n Radev, Dragomir and\n Choi, Yejin and\n Smith, Noah A.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.326/\",\n doi = \"10.18653/v1/2022.emnlp-main.326\",\n pages = \"4909--4923\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.326.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.326/", + "pdf_size": 890840, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10353611046601581594&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington\u2661\u2663; Allen Institute for AI\u2663\u2662; Tohoku University\u2662; Department of Computer Science, Yale University\u2660; Paul G. Allen School of Computer Science & Engineering, University of Washington\u2661\u2663; Allen Institute for AI\u2661\u2663; Paul G. Allen School of Computer Science & Engineering, University of Washington\u2661\u2663; Allen Institute for AI\u2661\u2663", + "aff_domain": "cs.washington.edu;tohoku.ac.jp;allenai.org;allenai.org;cs.washington.edu;yale.edu;cs.washington.edu;cs.washington.edu", + "email": "cs.washington.edu;tohoku.ac.jp;allenai.org;allenai.org;cs.washington.edu;yale.edu;cs.washington.edu;cs.washington.edu", + "github": "https://github.com/jungokasai/twist_decoding", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;3;0;1;0;1", + "aff_unique_norm": "University of Washington;Allen Institute for AI;Tohoku University;Yale University", + "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering;;;Department of Computer Science", + "aff_unique_url": "https://www.cs.washington.edu;https://allenai.org;https://www.tohoku.ac.jp;https://www.yale.edu", + "aff_unique_abbr": "UW;AI2;Tohoku U;Yale", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Seattle;", + "aff_country_unique_index": "0;0;1;0;0;0;0;0", + "aff_country_unique": "United States;Japan" + }, + { + "id": "2022.emnlp-main.691", + "title": "Two is Better than Many? Binary Classification as an Effective Approach to Multi-Choice Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We propose a simple refactoring of multi-choice question answering (MCQA) tasks as a series of binary classifications. The MCQA task is generally performed by scoring each (question, answer) pair normalized over all the pairs, and then selecting the answer from the pair that yield the highest score. For n answer choices, this is equivalent to an n-class classification setup where only one class (true answer) is correct. We instead show that classifying (question, true answer) as positive instances and (question, false answer) as negative instances is significantly more effective across various models and datasets. We show the efficacy of our proposed approach in different tasks \u2013 abductive reasoning, commonsense question answering, science question answering, and sentence completion. Our DeBERTa binary classification model reaches the top or close to the top performance on public leaderboards for these tasks. The source code of the proposed approach is available at https://github.com/declare-lab/TEAM.", + "author": "Deepanway Ghosal; Navonil Majumder; Rada Mihalcea; Soujanya Poria", + "authorids": "/d/deepanway-ghosal/; /n/navonil-majumder/; /r/rada-mihalcea/; /s/soujanya-poria/", + "bibtex": "@inproceedings{ghosal-etal-2022-two,\n title = \"Two is Better than Many? Binary Classification as an Effective Approach to Multi-Choice Question Answering\",\n author = \"Ghosal, Deepanway and\n Majumder, Navonil and\n Mihalcea, Rada and\n Poria, Soujanya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.691/\",\n doi = \"10.18653/v1/2022.emnlp-main.691\",\n pages = \"10158--10166\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.691.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.691/", + "pdf_size": 263728, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2825610430003786463&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "DeCLaRe Lab, Singapore University of Technology and Design, Singapore; DeCLaRe Lab, Singapore University of Technology and Design, Singapore; University of Michigan, USA; DeCLaRe Lab, Singapore University of Technology and Design, Singapore", + "aff_domain": "mymail.sutd.edu.sg;sutd.edu.sg;umich.edu;sutd.edu.sg", + "email": "mymail.sutd.edu.sg;sutd.edu.sg;umich.edu;sutd.edu.sg", + "github": "https://github.com/declare-lab/TEAM", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;1;0", + "aff_unique_norm": "Singapore University of Technology and Design;University of Michigan", + "aff_unique_dep": "DeCLaRe Lab;", + "aff_unique_url": "https://www.sutd.edu.sg;https://www.umich.edu", + "aff_unique_abbr": "SUTD;UM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;1;0", + "aff_country_unique": "Singapore;United States" + }, + { + "id": "2022.findings-emnlp.420", + "title": "TyDiP: A Dataset for Politeness Classification in Nine Typologically Diverse Languages", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We study politeness phenomena in nine typologically diverse languages. Politeness is an important facet of communication and is sometimes argued to be cultural-specific, yet existing computational linguistic study is limited to English. We create TyDiP, a dataset containing three-way politeness annotations for 500 examples in each language, totaling 4.5K examples. We evaluate how well multilingual models can identify politeness levels \u2013 they show a fairly robust zero-shot transfer ability, yet fall short of estimated human accuracy significantly. We further study mapping the English politeness strategy lexicon into nine languages via automatic translation and lexicon induction, analyzing whether each strategy\u2019s impact stays consistent across languages. Lastly, we empirically study the complicated relationship between formality and politeness through transfer experiments. We hope our dataset will support various research questions and applications, from evaluating multilingual models to constructing polite multilingual agents.", + "author": "Anirudh Srinivasan; Eunsol Choi", + "authorids": "/a/anirudh-srinivasan/; /e/eunsol-choi/", + "bibtex": "@inproceedings{srinivasan-choi-2022-tydip,\n title = \"{T}y{D}i{P}: A Dataset for Politeness Classification in Nine Typologically Diverse Languages\",\n author = \"Srinivasan, Anirudh and\n Choi, Eunsol\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.420/\",\n doi = \"10.18653/v1/2022.findings-emnlp.420\",\n pages = \"5723--5738\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.420.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.420/", + "pdf_size": 499465, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10703635978826455808&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science, The University of Texas at Austin; Department of Computer Science, The University of Texas at Austin", + "aff_domain": "utexas.edu;utexas.edu", + "email": "utexas.edu;utexas.edu", + "github": "https://github.com/Genius1237/TyDiP", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "The University of Texas at Austin", + "aff_unique_dep": "Department of Computer Science", + "aff_unique_url": "https://www.utexas.edu", + "aff_unique_abbr": "UT Austin", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Austin", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.429", + "title": "ULN: Towards Underspecified Vision-and-Language Navigation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Vision-and-Language Navigation (VLN) is a task to guide an embodied agent moving to a target position using language instructions. Despite the significant performance improvement, the wide use of fine-grained instructions fails to characterize more practical linguistic variations in reality. To fill in this gap, we introduce a new setting, namely Underspecified vision-and-Language Navigation (ULN), and associated evaluation datasets. ULN evaluates agents using multi-level underspecified instructions instead of purely fine-grained or coarse-grained, which is a more realistic and general setting. As a primary step toward ULN, we propose a VLN framework that consists of a classification module, a navigation agent, and an Exploitation-to-Exploration (E2E) module. Specifically, we propose to learn Granularity Specific Sub-networks (GSS) for the agent to ground multi-level instructions with minimal additional parameters. Then, our E2E module estimates grounding uncertainty and conducts multi-step lookahead exploration to improve the success rate further. Experimental results show that existing VLN models are still brittle to multi-level language underspecification. Our framework is more robust and outperforms the baselines on ULN by ~10% relative success rate across all levels.", + "author": "Weixi Feng; Tsu-Jui Fu; Yujie Lu; William Yang Wang", + "authorids": "/w/weixi-feng/; /t/tsu-jui-fu/; /y/yujie-lu/; /w/william-yang-wang/", + "bibtex": "@inproceedings{feng-etal-2022-uln,\n title = \"{ULN}: Towards Underspecified Vision-and-Language Navigation\",\n author = \"Feng, Weixi and\n Fu, Tsu-Jui and\n Lu, Yujie and\n Wang, William Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.429/\",\n doi = \"10.18653/v1/2022.emnlp-main.429\",\n pages = \"6394--6412\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.429.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.429/", + "pdf_size": 12752275, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1365557601391205567&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/weixi-feng/ULN", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.110", + "title": "Unbiased and Efficient Sampling of Dependency Trees", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Most computational models of dependency syntax consist of distributions over spanning trees. However, the majority of dependency treebanks require that every valid dependency tree has a single edge coming out of the ROOT node, a constraint that is not part of the definition of spanning trees. For this reason all standard inference algorithms for spanning trees are sub-optimal for inference over dependency trees.Zmigrod et al (2021) proposed algorithms for sampling with and without replacement from the dependency tree distribution that incorporate the single-root constraint. In this paper we show that their fastest algorithm for sampling with replacement, Wilson-RC, is in fact producing biased samples and we provide two alternatives that are unbiased. Additionally, we propose two algorithms (one incremental, one parallel) that reduce the asymptotic runtime of algorithm for sampling k trees without replacement to O(kn^3). These algorithms are both asymptotically and practically more efficient.", + "author": "Milo\u0161 Stanojevi\u0107", + "authorids": "/m/milos-stanojevic/", + "bibtex": "@inproceedings{stanojevic-2022-unbiased,\n title = \"Unbiased and Efficient Sampling of Dependency Trees\",\n author = \"Stanojevi{\\'c}, Milo{\\v{s}}\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.110/\",\n doi = \"10.18653/v1/2022.emnlp-main.110\",\n pages = \"1691--1706\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.110.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.110/", + "pdf_size": 665966, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9140115943242536046&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "DeepMind", + "aff_domain": "deepmind.com", + "email": "deepmind.com", + "github": "", + "project": "", + "author_num": 1, + "aff_unique_index": "0", + "aff_unique_norm": "DeepMind", + "aff_unique_dep": "", + "aff_unique_url": "https://deepmind.com", + "aff_unique_abbr": "DeepMind", + "aff_country_unique_index": "0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.findings-emnlp.538", + "title": "Uncertainty Quantification with Pre-trained Language Models: A Large-Scale Empirical Analysis", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Pre-trained language models (PLMs) have gained increasing popularity due to their compelling prediction performance in diverse natural language processing (NLP) tasks. When formulating a PLM-based prediction pipeline for NLP tasks, it is also crucial for the pipeline to minimize the calibration error, especially in safety-critical applications. That is, the pipeline should reliably indicate when we can trust its predictions. In particular, there are various considerations behind the pipeline: (1) the choice and (2) the size of PLM, (3) the choice of uncertainty quantifier, (4) the choice of fine-tuning loss, and many more. Although prior work has looked into some of these considerations, they usually draw conclusions based on a limited scope of empirical studies. There still lacks a holistic analysis on how to compose a well-calibrated PLM-based prediction pipeline. To fill this void, we compare a wide range of popular options for each consideration based on three prevalent NLP classification tasks and the setting of domain shift. In response, we recommend the following: (1) use ELECTRA for PLM encoding, (2) use larger PLMs if possible, (3) use Temp Scaling as the uncertainty quantifier, and (4) use Focal Loss for fine-tuning.", + "author": "Yuxin Xiao; Paul Pu Liang; Umang Bhatt; Willie Neiswanger; Ruslan Salakhutdinov; Louis-Philippe Morency", + "authorids": "/y/yuxin-xiao/; /p/paul-pu-liang/; /u/umang-bhatt/; /w/willie-neiswanger/; /r/ruslan-salakhutdinov/; /l/louis-philippe-morency/", + "bibtex": "@inproceedings{xiao-etal-2022-uncertainty,\n title = \"Uncertainty Quantification with Pre-trained Language Models: A Large-Scale Empirical Analysis\",\n author = \"Xiao, Yuxin and\n Liang, Paul Pu and\n Bhatt, Umang and\n Neiswanger, Willie and\n Salakhutdinov, Ruslan and\n Morency, Louis-Philippe\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.538/\",\n doi = \"10.18653/v1/2022.findings-emnlp.538\",\n pages = \"7273--7284\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.538.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.538/", + "pdf_size": 364349, + "gs_citation": 84, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7698166899882121928&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Massachusetts Institute of Technology; Carnegie Mellon University; University of Cambridge; Stanford University; Carnegie Mellon University; Carnegie Mellon University", + "aff_domain": "mit.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cam.ac.uk;cs.stanford.edu", + "email": "mit.edu;cs.cmu.edu;cs.cmu.edu;cs.cmu.edu;cam.ac.uk;cs.stanford.edu", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;1;1", + "aff_unique_norm": "Massachusetts Institute of Technology;Carnegie Mellon University;University of Cambridge;Stanford University", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://web.mit.edu;https://www.cmu.edu;https://www.cam.ac.uk;https://www.stanford.edu", + "aff_unique_abbr": "MIT;CMU;Cambridge;Stanford", + "aff_campus_unique_index": "1;2", + "aff_campus_unique": ";Cambridge;Stanford", + "aff_country_unique_index": "0;0;1;0;0;0", + "aff_country_unique": "United States;United Kingdom" + }, + { + "id": "2022.emnlp-main.266", + "title": "Understanding Jargon: Combining Extraction and Generation for Definition Modeling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Can machines know what twin prime is? From the composition of this phrase, machines may guess twin prime is a certain kind of prime, but it is still difficult to deduce exactly what twin stands for without additional knowledge. Here, twin prime is a jargon - a specialized term used by experts in a particular field. Explaining jargon is challenging since it usually requires domain knowledge to understand. Recently, there is an increasing interest in extracting and generating definitions of words automatically. However, existing approaches, either extraction or generation, perform poorly on jargon. In this paper, we propose to combine extraction and generation for jargon definition modeling: first extract self- and correlative definitional information of target jargon from the Web and then generate the final definitions by incorporating the extracted definitional information. Our framework is remarkably simple but effective: experiments demonstrate our method can generate high-quality definitions for jargon and outperform state-of-the-art models significantly, e.g., BLEU score from 8.76 to 22.66 and human-annotated score from 2.34 to 4.04.", + "author": "Jie Huang; Hanyin Shao; Kevin Chen-Chuan Chang; Jinjun Xiong; Wen-mei Hwu", + "authorids": "/j/jie-huang/; /h/hanyin-shao/; /k/kevin-chen-chuan-chang/; /j/jinjun-xiong/; /w/wen-mei-hwu/", + "bibtex": "@inproceedings{huang-etal-2022-understanding,\n title = \"Understanding Jargon: Combining Extraction and Generation for Definition Modeling\",\n author = \"Huang, Jie and\n Shao, Hanyin and\n Chang, Kevin Chen-Chuan and\n Xiong, Jinjun and\n Hwu, Wen-mei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.266/\",\n doi = \"10.18653/v1/2022.emnlp-main.266\",\n pages = \"3994--4004\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.266.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.266/", + "pdf_size": 687506, + "gs_citation": 17, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7460807783616253064&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "University of Illinois at Urbana-Champaign, USA; University of Illinois at Urbana-Champaign, USA; University of Illinois at Urbana-Champaign, USA; University at Buffalo, USA; University of Illinois at Urbana-Champaign, USA + NVIDIA, USA", + "aff_domain": "illinois.edu;illinois.edu;illinois.edu;buffalo.edu;illinois.edu", + "email": "illinois.edu;illinois.edu;illinois.edu;buffalo.edu;illinois.edu", + "github": "https://github.com/jeffhj/CDM", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;0+2", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;University at Buffalo;NVIDIA", + "aff_unique_dep": ";;", + "aff_unique_url": "https://illinois.edu;https://www.buffalo.edu;https://www.nvidia.com", + "aff_unique_abbr": "UIUC;UB;NV", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Urbana-Champaign;", + "aff_country_unique_index": "0;0;0;0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.626", + "title": "Understanding ME? Multimodal Evaluation for Fine-grained Visual Commonsense", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Visual commonsense understanding requires Vision Language (VL) models to not only understand image and text but also cross-reference in-between to fully integrate and achieve comprehension of the visual scene described. Recently, various approaches have been developed and have achieved high performance on visual commonsense benchmarks. However, it is unclear whether the models really understand the visual scene and underlying commonsense knowledge due to limited evaluation data resources. To provide an in-depth analysis, we present a Multimodal Evaluation (ME) pipeline to automatically generate question-answer pairs to test models\u2019 understanding of the visual scene, text, and related knowledge. We then take a step further to show that training with the ME data boosts the model\u2019s performance in standard VCR evaluation. Lastly, our in-depth analysis and comparison reveal interesting findings: (1) semantically low-level information can assist the learning of high-level information but not the opposite; (2) visual information is generally under utilization compared with text.", + "author": "Zhecan Wang; Haoxuan You; Yicheng He; Wenhao Li; Kai-Wei Chang; Shih-Fu Chang", + "authorids": "/z/zhecan-wang/; /h/haoxuan-you/; /y/yicheng-he/; /w/wenhao-li/; /k/kai-wei-chang/; /s/shih-fu-chang/", + "bibtex": "@inproceedings{wang-etal-2022-understanding-multimodal,\n title = \"Understanding {ME}? Multimodal Evaluation for Fine-grained Visual Commonsense\",\n author = \"Wang, Zhecan and\n You, Haoxuan and\n He, Yicheng and\n Li, Wenhao and\n Chang, Kai-Wei and\n Chang, Shih-Fu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.626/\",\n doi = \"10.18653/v1/2022.emnlp-main.626\",\n pages = \"9212--9224\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.626.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.626/", + "pdf_size": 2517526, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=15985258494176770030&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Columbia University; Columbia University; Columbia University; Columbia University; University of California, Los Angeles; Columbia University", + "aff_domain": "columbia.edu;columbia.edu;columbia.edu;columbia.edu;cs.ucla.edu; ", + "email": "columbia.edu;columbia.edu;columbia.edu;columbia.edu;cs.ucla.edu; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;0", + "aff_unique_norm": "Columbia University;University of California, Los Angeles", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.columbia.edu;https://www.ucla.edu", + "aff_unique_abbr": "Columbia;UCLA", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Los Angeles", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.182", + "title": "Understanding Social Media Cross-Modality Discourse in Linguistic Space", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The multimedia communications with texts and images are popular on social media. However, limited studies concern how images are structured with texts to form coherent meanings in human cognition. To fill in the gap, we present a novel concept of cross-modality discourse, reflecting how human readers couple image and text understandings. Text descriptions are first derived from images (named as subtitles) in the multimedia contexts. Five labels \u2013 entity-level insertion, projection and concretization and scene-level restatement and extension \u2014 are further employed to shape the structure of subtitles and texts and present their joint meanings. As a pilot study, we also build the very first dataset containing over 16K multimedia tweets with manually annotated discourse labels. The experimental results show that trendy multimedia encoders based on multi-head attention (with captions) are unable to well understand cross-modality discourse and additionally modeling texts at the output layer helps yield the-state-of-the-art results.", + "author": "Chunpu Xu; Hanzhuo Tan; Jing Li; Piji Li", + "authorids": "/c/chunpu-xu/; /h/hanzhuo-tan/; /j/jing-li/; /p/piji-li/", + "bibtex": "@inproceedings{xu-etal-2022-understanding,\n title = \"Understanding Social Media Cross-Modality Discourse in Linguistic Space\",\n author = \"Xu, Chunpu and\n Tan, Hanzhuo and\n Li, Jing and\n Li, Piji\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.182/\",\n doi = \"10.18653/v1/2022.findings-emnlp.182\",\n pages = \"2459--2471\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.182.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.182/", + "pdf_size": 3010004, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8240005755605308234&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 7, + "aff": "Department of Computing, The Hong Kong Polytechnic University, China; Department of Computing, The Hong Kong Polytechnic University, China; Department of Computing, The Hong Kong Polytechnic University, China; College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, China", + "aff_domain": "connect.polyu.hk;connect.polyu.hk;polyu.edu.hk;nuaa.edu.cn", + "email": "connect.polyu.hk;connect.polyu.hk;polyu.edu.hk;nuaa.edu.cn", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "The Hong Kong Polytechnic University;Nanjing University of Aeronautics and Astronautics", + "aff_unique_dep": "Department of Computing;College of Computer Science and Technology", + "aff_unique_url": "https://www.polyu.edu.hk;http://www.nuaa.edu.cn", + "aff_unique_abbr": "PolyU;NUAA", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Nanjing", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.450", + "title": "Understanding and Improving Knowledge Distillation for Quantization Aware Training of Large Transformer Encoders", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Knowledge distillation (KD) has been a ubiquitous method for model compression to strengthen the capability of a lightweight model with the transferred knowledge from the teacher. In particular, KD has been employed in quantization-aware training (QAT) of Transformer encoders like BERT to improve the accuracy of the student model with the reduced-precision weight parameters. However, little is understood about which of the various KD approaches best fits the QAT of Transformers. In this work, we provide an in-depth analysis of the mechanism of KD on attention recovery of quantized large Transformers. In particular, we reveal that the previously adopted MSE loss on the attention score is insufficient for recovering the self-attention information. Therefore, we propose two KD methods; attention-map and attention-output losses. Furthermore, we explore the unification of both losses to address task-dependent preference between attention-map and output losses. The experimental results on various Transformer encoder models demonstrate that the proposed KD methods achieve state-of-the-art accuracy for QAT with sub-2-bit weight quantization.", + "author": "Minsoo Kim; Sihwa Lee; Suk-Jin Hong; Du-Seong Chang; Jungwook Choi", + "authorids": "/m/minsoo-kim/; /s/sihwa-lee/; /s/suk-jin-hong/; /d/du-seong-chang/; /j/jungwook-choi/", + "bibtex": "@inproceedings{kim-etal-2022-understanding,\n title = \"Understanding and Improving Knowledge Distillation for Quantization Aware Training of Large Transformer Encoders\",\n author = \"Kim, Minsoo and\n Lee, Sihwa and\n Hong, Suk-Jin and\n Chang, Du-Seong and\n Choi, Jungwook\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.450/\",\n doi = \"10.18653/v1/2022.emnlp-main.450\",\n pages = \"6713--6725\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.450.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.450/", + "pdf_size": 2637325, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16723615474376960205&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": ";;;;", + "aff_domain": ";;;;", + "email": ";;;;", + "github": "", + "project": "", + "author_num": 5 + }, + { + "id": "2022.emnlp-main.605", + "title": "Uni-Parser: Unified Semantic Parser for Question Answering on Knowledge Base and Database", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Parsing natural language questions into executable logical forms is a useful and interpretable way to perform question answering on structured data such as knowledge bases (KB) or databases (DB). However, existing approaches on semantic parsing cannot adapt to both modalities, as they suffer from the exponential growth of the logical form candidates and can hardly generalize to unseen data.In this work, we propose Uni-Parser, a unified semantic parser for question answering (QA) on both KB and DB. We define the primitive (relation and entity in KB, and table name, column name and cell value in DB) as the essential element in our framework. The number of primitives grows only at a linear rate to the number of retrieved relations in KB and DB, preventing us from exponential logic form candidates. We leverage the generator to predict final logical forms by altering and composing top-ranked primitives with different operations (e.g. select, where, count). With sufficiently pruned search space by a contrastive primitive ranker, the generator is empowered to capture the composition of primitives enhancing its generalization ability. We achieve competitive results on multiple KB and DB QA benchmarks with more efficiency, especially in the compositional and zero-shot settings.", + "author": "Ye Liu; Semih Yavuz; Rui Meng; Dragomir Radev; Caiming Xiong; Yingbo Zhou", + "authorids": "/y/ye-liu/; /s/semih-yavuz/; /r/rui-meng/; /d/dragomir-radev/; /c/caiming-xiong/; /y/yingbo-zhou/", + "bibtex": "@inproceedings{liu-etal-2022-uni,\n title = \"Uni-Parser: Unified Semantic Parser for Question Answering on Knowledge Base and Database\",\n author = \"Liu, Ye and\n Yavuz, Semih and\n Meng, Rui and\n Radev, Dragomir and\n Xiong, Caiming and\n Zhou, Yingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.605/\",\n doi = \"10.18653/v1/2022.emnlp-main.605\",\n pages = \"8858--8869\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.605.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.605/", + "pdf_size": 487537, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17629369161584296600&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Salesforce Research; Salesforce Research; Salesforce Research; Yale University; Salesforce Research; Salesforce Research", + "aff_domain": "salesforce.com;salesforce.com;salesforce.com;yale.edu;salesforce.com;salesforce.com", + "email": "salesforce.com;salesforce.com;salesforce.com;yale.edu;salesforce.com;salesforce.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;1;0;0", + "aff_unique_norm": "Salesforce;Yale University", + "aff_unique_dep": "Salesforce Research;", + "aff_unique_url": "https://research.salesforce.com;https://www.yale.edu", + "aff_unique_abbr": "Salesforce;Yale", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.218", + "title": "UniGeo: Unifying Geometry Logical Reasoning via Reformulating Mathematical Expression", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Geometry problem solving is a well-recognized testbed for evaluating the high-level multi-modal reasoning capability of deep models. In most existing works, two main geometry problems: calculation and proving, are usually treated as two specific tasks, hindering a deep model to unify its reasoning capability on multiple math tasks. However, in essence, these two tasks have similar problem representations and overlapped math knowledge which can improve the understanding and reasoning ability of a deep model on both two tasks. Therefore, we construct a large-scale Unified Geometry problem benchmark, UniGeo, which contains 4,998 calculation problems and 9,543 proving problems. Each proving problem is annotated with a multi-step proof with reasons and mathematical expressions. The proof can be easily reformulated as a proving sequence that shares the same formats with the annotated program sequence for calculation problems. Naturally, we also present a unified multi-task Geometric Transformer framework, Geoformer, to tackle calculation and proving problems simultaneously in the form of sequence generation, which finally shows the reasoning ability can be improved on both two tasks by unifying formulation. Furthermore, we propose a Mathematical Expression Pretraining (MEP) method that aims to predict the mathematical expressions in the problem solution, thus improving the Geoformer model. Experiments on the UniGeo demonstrate that our proposed Geoformer obtains state-of-the-art performance by outperforming task-specific model NGS with over 5.6% and 3.2% accuracies on calculation and proving problems, respectively.", + "author": "Jiaqi Chen; Tong Li; Jinghui Qin; Pan Lu; Liang Lin; Chongyu Chen; Xiaodan Liang", + "authorids": "/j/jiaqi-chen/; /t/tong-li/; /j/jinghui-qin/; /p/pan-lu/; /l/liang-lin/; /c/chongyu-chen/; /x/xiaodan-liang/", + "bibtex": "@inproceedings{chen-etal-2022-unigeo,\n title = \"{U}ni{G}eo: Unifying Geometry Logical Reasoning via Reformulating Mathematical Expression\",\n author = \"Chen, Jiaqi and\n Li, Tong and\n Qin, Jinghui and\n Lu, Pan and\n Lin, Liang and\n Chen, Chongyu and\n Liang, Xiaodan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.218/\",\n doi = \"10.18653/v1/2022.emnlp-main.218\",\n pages = \"3313--3323\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.218.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.218/", + "pdf_size": 682555, + "gs_citation": 111, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1967300551192394433&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "Sun Yat-sen University+The University of Hong Kong; Shenzhen Campus of Sun Yat-sen University; Guangdong University of Technology; University of California, Los Angeles; Sun Yat-sen University; DarkMatter AI Research; Sun Yat-sen University+Shenzhen Campus of Sun Yat-sen University", + "aff_domain": ";;;;;;", + "email": ";;;;;;", + "github": "https://github.com/chen-judge/UniGeo", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0;2;3;0;4;0+0", + "aff_unique_norm": "Sun Yat-sen University;The University of Hong Kong;Guangdong University of Technology;University of California, Los Angeles;DarkMatter AI Research", + "aff_unique_dep": ";;;;AI Research", + "aff_unique_url": "http://www.sysu.edu.cn/;https://www.hku.hk;http://www.gdut.edu.cn;https://www.ucla.edu;", + "aff_unique_abbr": "SYSU;HKU;GDUT;UCLA;", + "aff_campus_unique_index": ";1;2;1", + "aff_campus_unique": ";Shenzhen;Los Angeles", + "aff_country_unique_index": "0+0;0;0;1;0;1;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.534", + "title": "UniMSE: Towards Unified Multimodal Sentiment Analysis and Emotion Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Multimodal sentiment analysis (MSA) and emotion recognition in conversation (ERC) are key research topics for computers to understand human behaviors. From a psychological perspective, emotions are the expression of affect or feelings during a short period, while sentiments are formed and held for a longer period. However, most existing works study sentiment and emotion separately and do not fully exploit the complementary knowledge behind the two. In this paper, we propose a multimodal sentiment knowledge-sharing framework (UniMSE) that unifies MSA and ERC tasks from features, labels, and models. We perform modality fusion at the syntactic and semantic levels and introduce contrastive learning between modalities and samples to better capture the difference and consistency between sentiments and emotions. Experiments on four public benchmark datasets, MOSI, MOSEI, MELD, and IEMOCAP, demonstrate the effectiveness of the proposed method and achieve consistent improvements compared with state-of-the-art methods.", + "author": "Guimin Hu; Ting-En Lin; Yi Zhao; Guangming Lu; Yuchuan Wu; Yongbin Li", + "authorids": "/g/guimin-hu/; /t/ting-en-lin/; /y/yi-zhao/; /g/guangming-lu/; /y/yuchuan-wu/; /y/yongbin-li/", + "bibtex": "@inproceedings{hu-etal-2022-unimse,\n title = \"{U}ni{MSE}: Towards Unified Multimodal Sentiment Analysis and Emotion Recognition\",\n author = \"Hu, Guimin and\n Lin, Ting-En and\n Zhao, Yi and\n Lu, Guangming and\n Wu, Yuchuan and\n Li, Yongbin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.534/\",\n doi = \"10.18653/v1/2022.emnlp-main.534\",\n pages = \"7837--7851\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.534.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.534/", + "pdf_size": 1953168, + "gs_citation": 194, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3640546593878334270&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "School of Computer Science and Technology, School of Science, Harbin Institute of Technology (Shenzhen), China\u2020; School of Computer Science and Technology, School of Science, Harbin Institute of Technology (Shenzhen), China\u2020; School of Computer Science and Technology, School of Science, Harbin Institute of Technology (Shenzhen), China\u2020\u2217; School of Computer Science and Technology, School of Science, Harbin Institute of Technology (Shenzhen), China\u2020; ; \u2217", + "aff_domain": "gmail.com; ;hit.edu.cn;hit.edu.cn; ;gmail.com", + "email": "gmail.com; ;hit.edu.cn;hit.edu.cn; ;gmail.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Harbin Institute of Technology;", + "aff_unique_dep": "School of Computer Science and Technology;", + "aff_unique_url": "http://en.hhit.edu.cn/;", + "aff_unique_abbr": "HIT;", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Shenzhen;", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "China;" + }, + { + "id": "2022.emnlp-main.494", + "title": "UniNL: Aligning Representation Learning with Scoring Function for OOD Detection via Unified Neighborhood Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Detecting out-of-domain (OOD) intents from user queries is essential for avoiding wrong operations in task-oriented dialogue systems. The key challenge is how to distinguish in-domain (IND) and OOD intents. Previous methods ignore the alignment between representation learning and scoring function, limiting the OOD detection performance. In this paper, we propose a unified neighborhood learning framework (UniNL) to detect OOD intents. Specifically, we design a KNCL objective for representation learning, and introduce a KNN-based scoring function for OOD detection. We aim to align representation learning with scoring function. Experiments and analysis on two benchmark datasets show the effectiveness of our method.", + "author": "Yutao Mou; Pei Wang; Keqing He; Yanan Wu; Jingang Wang; Wei Wu; Weiran Xu", + "authorids": "/y/yutao-mou/; /p/pei-wang/; /k/keqing-he/; /y/yanan-wu/; /j/jingang-wang/; /w/wei-wu/; /w/weiran-xu/", + "bibtex": "@inproceedings{mou-etal-2022-uninl,\n title = \"{U}ni{NL}: Aligning Representation Learning with Scoring Function for {OOD} Detection via Unified Neighborhood Learning\",\n author = \"Mou, Yutao and\n Wang, Pei and\n He, Keqing and\n Wu, Yanan and\n Wang, Jingang and\n Wu, Wei and\n Xu, Weiran\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.494/\",\n doi = \"10.18653/v1/2022.emnlp-main.494\",\n pages = \"7317--7325\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.494.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.494/", + "pdf_size": 788751, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4600810899592149706&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Meituan; Beijing University of Posts and Telecommunications; Meituan; Meituan; Beijing University of Posts and Telecommunications", + "aff_domain": "bupt.edu.cn;bupt.edu.cn;meituan.com;bupt.edu.cn;meituan.com;meituan.com;bupt.edu.cn", + "email": "bupt.edu.cn;bupt.edu.cn;meituan.com;bupt.edu.cn;meituan.com;meituan.com;bupt.edu.cn", + "github": "https://github.com/Yupei-Wang/UniNL", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;1;0;1;1;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Meituan", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bupt.edu.cn/;https://www.meituan.com", + "aff_unique_abbr": "BUPT;Meituan", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.508", + "title": "UniRPG: Unified Discrete Reasoning over Table and Text as Program Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Question answering requiring discrete reasoning, e.g., arithmetic computing, comparison, and counting, over knowledge is a challenging task.In this paper, we propose UniRPG, a semantic-parsing-based approach advanced in interpretability and scalability, to perform Unified discrete Reasoning over heterogeneous knowledge resources, i.e., table and text, as Program Generation. Concretely, UniRPG consists of a neural programmer and a symbolic program executor,where a program is the composition of a set of pre-defined general atomic and higher-order operations and arguments extracted from table and text.First, the programmer parses a question into a program by generating operations and copying arguments, and then, the executor derives answers from table and text based on the program.To alleviate the costly program annotation issue, we design a distant supervision approach for programmer learning, where pseudo programs are automatically constructed without annotated derivations.Extensive experiments on the TAT-QA dataset show that UniRPG achieves tremendous improvements and enhances interpretability and scalability compared with previous state-of-the-art methods, even without derivation annotation.Moreover, it achieves promising performance on the textual dataset DROP without derivation annotation.", + "author": "Yongwei Zhou; Junwei Bao; Chaoqun Duan; Youzheng Wu; Xiaodong He; Tiejun Zhao", + "authorids": "/y/yongwei-zhou/; /j/junwei-bao/; /c/chaoqun-duan/; /y/youzheng-wu/; /x/xiaodong-he/; /t/tiejun-zhao/", + "bibtex": "@inproceedings{zhou-etal-2022-unirpg,\n title = \"{U}ni{RPG}: Unified Discrete Reasoning over Table and Text as Program Generation\",\n author = \"Zhou, Yongwei and\n Bao, Junwei and\n Duan, Chaoqun and\n Wu, Youzheng and\n He, Xiaodong and\n Zhao, Tiejun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.508/\",\n doi = \"10.18653/v1/2022.emnlp-main.508\",\n pages = \"7494--7507\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.508.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.508/", + "pdf_size": 1077553, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8664398629504407353&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Harbin Institute of Technology; JD AI Research; JD AI Research; JD AI Research; JD AI Research; Harbin Institute of Technology", + "aff_domain": "hit-mtlab.net;gmail.com; ; ; ;hit.edu.cn", + "email": "hit-mtlab.net;gmail.com; ; ; ;hit.edu.cn", + "github": "https://github.com/JD-AI-Research-NLP/UniRPG", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;0", + "aff_unique_norm": "Harbin Institute of Technology;JD AI Research", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.hit.edu.cn/;https://www.jd.com", + "aff_unique_abbr": "HIT;JD AI", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Harbin;", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.477", + "title": "UniRel: Unified Representation and Interaction for Joint Relational Triple Extraction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Relational triple extraction is challenging for its difficulty in capturing rich correlations between entities and relations. Existing works suffer from 1) heterogeneous representations of entities and relations, and 2) heterogeneous modeling of entity-entity interactions and entity-relation interactions. Therefore, the rich correlations are not fully exploited by existing works. In this paper, we propose UniRel to address these challenges. Specifically, we unify the representations of entities and relations by jointly encoding them within a concatenated natural language sequence, and unify the modeling of interactions with a proposed Interaction Map, which is built upon the off-the-shelf self-attention mechanism within any Transformer block. With comprehensive experiments on two popular relational triple extraction datasets, we demonstrate that UniRel is more effective and computationally efficient. The source code is available at https://github.com/wtangdev/UniRel.", + "author": "Wei Tang; Benfeng Xu; Yuyue Zhao; Zhendong Mao; Yifeng Liu; Yong Liao; Haiyong Xie", + "authorids": "/w/wei-tang/; /b/benfeng-xu/; /y/yuyue-zhao/; /z/zhendong-mao/; /y/yifeng-liu/; /y/yong-liao/; /h/haiyong-xie/", + "bibtex": "@inproceedings{tang-etal-2022-unirel,\n title = \"{U}ni{R}el: Unified Representation and Interaction for Joint Relational Triple Extraction\",\n author = \"Tang, Wei and\n Xu, Benfeng and\n Zhao, Yuyue and\n Mao, Zhendong and\n Liu, Yifeng and\n Liao, Yong and\n Xie, Haiyong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.477/\",\n doi = \"10.18653/v1/2022.emnlp-main.477\",\n pages = \"7087--7099\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.477.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.477/", + "pdf_size": 784487, + "gs_citation": 66, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16236330449051711840&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Science and Technology of China, Anhui, China+CCCD Key Lab of Ministry of Culture and Tourism, Anhui, China; University of Science and Technology of China, Anhui, China+CCCD Key Lab of Ministry of Culture and Tourism, Anhui, China; University of Science and Technology of China, Anhui, China+CCCD Key Lab of Ministry of Culture and Tourism, Anhui, China; University of Science and Technology of China, Anhui, China+CCCD Key Lab of Ministry of Culture and Tourism, Anhui, China; National Engineering Laboratory for Risk Perception and Prevention (RPP), Beijing, China; University of Science and Technology of China, Anhui, China+CCCD Key Lab of Ministry of Culture and Tourism, Anhui, China; University of Science and Technology of China, Anhui, China+CCCD Key Lab of Ministry of Culture and Tourism, Anhui, China", + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;cetc.com.cn;ustc.edu.cn;ustc.edu.cn", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;cetc.com.cn;ustc.edu.cn;ustc.edu.cn", + "github": "https://github.com/wtangdev/UniRel", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;0+1;2;0+1;0+1", + "aff_unique_norm": "University of Science and Technology of China;CCCD Key Lab of Ministry of Culture and Tourism;National Engineering Laboratory for Risk Perception and Prevention", + "aff_unique_dep": ";;Risk Perception and Prevention", + "aff_unique_url": "http://www.ustc.edu.cn;;", + "aff_unique_abbr": "USTC;;RPP", + "aff_campus_unique_index": "0;0;0;0;2;0;0", + "aff_campus_unique": "Anhui;;Beijing", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.39", + "title": "UnifiedSKG: Unifying and Multi-Tasking Structured Knowledge Grounding with Text-to-Text Language Models", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Structured knowledge grounding (SKG) leverages structured knowledge to complete user requests, such as semantic parsing over databases and question answering over knowledge bases. Since the inputs and outputs of SKG tasks are heterogeneous, they have been studied separately by different communities, which limits systematic and compatible research on SKG. In this paper, we overcome this limitation by proposing the UnifiedSKG framework, which unifies 21 SKG tasks into a text-to-text format, aiming to promote systematic SKG research, instead of being exclusive to a single task, domain, or dataset. We use UnifiedSKG to benchmark T5 with different sizes and show that T5, with simple modifications when necessary, achieves state-of-the-art performance on almost all of the 21 tasks. We further demonstrate that multi-task prefix-tuning improves the performance on most tasks, largely improving the overall performance. UnifiedSKG also facilitates the investigation of zero-shot and few-shot learning, and we show that T0, GPT-3, and Codex struggle in zero-shot and few-shot learning for SKG. We also use UnifiedSKG to conduct a series of controlled experiments on structured knowledge encoding variants across SKG tasks. UnifiedSKG is easily extensible to more tasks, and it is open-sourced at https://github.com/hkunlp/unifiedskg.", + "author": "Tianbao Xie; Chen Henry Wu; Peng Shi; Ruiqi Zhong; Torsten Scholak; Michihiro Yasunaga; Chien-Sheng Wu; Ming Zhong; Pengcheng Yin; Sida I. Wang; Victor Zhong; Bailin Wang; Chengzu Li; Connor Boyle; Ansong Ni; Ziyu Yao; Dragomir Radev; Caiming Xiong; Lingpeng Kong; Rui Zhang; Noah A. Smith; Luke Zettlemoyer; Tao Yu", + "authorids": "/t/tianbao-xie/; /c/chen-henry-wu/; /p/peng-shi/; /r/ruiqi-zhong/; /t/torsten-scholak/; /m/michihiro-yasunaga/; /c/chien-sheng-wu/; /m/ming-zhong/; /p/pengcheng-yin/; /s/sida-i-wang/; /v/victor-zhong/; /b/bailin-wang/; /c/chengzu-li/; /c/connor-boyle/; /a/ansong-ni/; /z/ziyu-yao/; /d/dragomir-radev/; /c/caiming-xiong/; /l/lingpeng-kong/; /r/rui-zhang/; /n/noah-a-smith/; /l/luke-zettlemoyer/; /t/tao-yu/", + "bibtex": "@inproceedings{xie-etal-2022-unifiedskg,\n title = \"{U}nified{SKG}: Unifying and Multi-Tasking Structured Knowledge Grounding with Text-to-Text Language Models\",\n author = \"Xie, Tianbao and\n Wu, Chen Henry and\n Shi, Peng and\n Zhong, Ruiqi and\n Scholak, Torsten and\n Yasunaga, Michihiro and\n Wu, Chien-Sheng and\n Zhong, Ming and\n Yin, Pengcheng and\n Wang, Sida I. and\n Zhong, Victor and\n Wang, Bailin and\n Li, Chengzu and\n Boyle, Connor and\n Ni, Ansong and\n Yao, Ziyu and\n Radev, Dragomir and\n Xiong, Caiming and\n Kong, Lingpeng and\n Zhang, Rui and\n Smith, Noah A. and\n Zettlemoyer, Luke and\n Yu, Tao\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.39/\",\n doi = \"10.18653/v1/2022.emnlp-main.39\",\n pages = \"602--631\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.39.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.39/", + "pdf_size": 1587523, + "gs_citation": 214, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6897687897712619632&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": ";;;;;;;;;;;;;;;;;;;;;;", + "aff_domain": ";;;;;;;;;;;;;;;;;;;;;;", + "email": ";;;;;;;;;;;;;;;;;;;;;;", + "github": "https://github.com/hkunlp/unifiedskg", + "project": "https://unifiedskg.com", + "author_num": 23 + }, + { + "id": "2022.emnlp-main.500", + "title": "Unifying Data Perspectivism and Personalization: An Application to Social Norms", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Instead of using a single ground truth for language processing tasks, several recent studies have examined how to represent and predict the labels of the set of annotators. However, often little or no information about annotators is known, or the set of annotators is small. In this work, we examine a corpus of social media posts about conflict from a set of 13k annotators and 210k judgements of social norms. We provide a novel experimental setup that applies personalization methods to the modeling of annotators and compare their effectiveness for predicting the perception of social norms. We further provide an analysis of performance across subsets of social situations that vary by the closeness of the relationship between parties in conflict, and assess where personalization helps the most.", + "author": "Joan Plepi; B\u00e9la Neuendorf; Lucie Flek; Charles Welch", + "authorids": "/j/joan-plepi/; /b/bela-neuendorf/; /l/lucie-flek/; /c/charles-welch/", + "bibtex": "@inproceedings{plepi-etal-2022-unifying,\n title = \"Unifying Data Perspectivism and Personalization: An Application to Social Norms\",\n author = \"Plepi, Joan and\n Neuendorf, B{\\'e}la and\n Flek, Lucie and\n Welch, Charles\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.500/\",\n doi = \"10.18653/v1/2022.emnlp-main.500\",\n pages = \"7391--7402\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.500.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.500/", + "pdf_size": 462413, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13520679759880368169&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Conversational AI and Social Analytics (CAISA) Lab, Department of Mathematics and Computer Science, University of Marburg + The Hessian Center for Artificial Intelligence (Hessian.AI); Conversational AI and Social Analytics (CAISA) Lab, Department of Mathematics and Computer Science, University of Marburg + The Hessian Center for Artificial Intelligence (Hessian.AI); Conversational AI and Social Analytics (CAISA) Lab, Department of Mathematics and Computer Science, University of Marburg + The Hessian Center for Artificial Intelligence (Hessian.AI); Conversational AI and Social Analytics (CAISA) Lab, Department of Mathematics and Computer Science, University of Marburg + The Hessian Center for Artificial Intelligence (Hessian.AI)", + "aff_domain": "uni-marburg.de;uni-marburg.de;uni-marburg.de;uni-marburg.de", + "email": "uni-marburg.de;uni-marburg.de;uni-marburg.de;uni-marburg.de", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;0+1;0+1;0+1", + "aff_unique_norm": "University of Marburg;Hessian Center for Artificial Intelligence", + "aff_unique_dep": "Department of Mathematics and Computer Science;Artificial Intelligence", + "aff_unique_url": "https://www.uni-marburg.de;https://hessian.ai", + "aff_unique_abbr": "UM;Hessian.AI", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.458", + "title": "Unifying the Convergences in Multilingual Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Although all-in-one-model multilingual neural machine translation (MNMT) has achieved remarkable progress, the convergence inconsistency in the joint training is ignored, i.e., different language pairs reaching convergence in different epochs. This leads to the trained MNMT model over-fitting low-resource language translations while under-fitting high-resource ones. In this paper, we propose a novel training strategy named LSSD (LanguageSpecific Self-Distillation), which can alleviate the convergence inconsistency and help MNMT models achieve the best performance on each language pair simultaneously. Specifically, LSSD picks up language-specific best checkpoints for each language pair to teach the current model on the fly. Furthermore, we systematically explore three sample-level manipulations of knowledge transferring. Experimental results on three datasets show that LSSD obtains consistent improvements towards all language pairs and achieves the state-of-the-art.", + "author": "Yichong Huang; Xiaocheng Feng; Xinwei Geng; Bing Qin", + "authorids": "/y/yichong-huang/; /x/xiaocheng-feng/; /x/xinwei-geng/; /b/bing-qin/", + "bibtex": "@inproceedings{huang-etal-2022-unifying,\n title = \"Unifying the Convergences in Multilingual Neural Machine Translation\",\n author = \"Huang, Yichong and\n Feng, Xiaocheng and\n Geng, Xinwei and\n Qin, Bing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.458/\",\n doi = \"10.18653/v1/2022.emnlp-main.458\",\n pages = \"6822--6835\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.458.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.458/", + "pdf_size": 1474591, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16040670855322296365&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": ";;;", + "aff_domain": ";;;", + "email": ";;;", + "github": "https://github.com/OrangeInSouth/LSSD", + "project": "", + "author_num": 4 + }, + { + "id": "2022.emnlp-main.175", + "title": "Unobserved Local Structures Make Compositional Generalization Hard", + "track": "main", + "status": "Main", + "award": false, + "abstract": "While recent work has shown that sequence-to-sequence models struggle to generalize to new compositions (termed compositional generalization), little is known on what makes compositional generalization hard on a particular test instance. In this work, we investigate the factors that make generalization to certain test instances challenging. We first substantiate that some examples are more difficult than others by showing that different models consistently fail or succeed on the same test instances. Then, we propose a criterion for the difficulty of an example: a test instance is hard if it contains a local structure that was not observed at training time. We formulate a simple decision rule based on this criterion and empirically show it predicts instance-level generalization well across 5 different semantic parsing datasets, substantially better than alternative decision rules. Last, we show local structures can be leveraged for creating difficult adversarial compositional splits and also to improve compositional generalization under limited training budgets by strategically selecting examples for the training set.", + "author": "Ben Bogin; Shivanshu Gupta; Jonathan Berant", + "authorids": "/b/ben-bogin/; /s/shivanshu-gupta/; /j/jonathan-berant/", + "bibtex": "@inproceedings{bogin-etal-2022-unobserved,\n title = \"Unobserved Local Structures Make Compositional Generalization Hard\",\n author = \"Bogin, Ben and\n Gupta, Shivanshu and\n Berant, Jonathan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.175/\",\n doi = \"10.18653/v1/2022.emnlp-main.175\",\n pages = \"2731--2747\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.175.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.175/", + "pdf_size": 544887, + "gs_citation": 27, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7675721780203119881&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Tel-Aviv University; University of California Irvine; Tel-Aviv University", + "aff_domain": "cs.tau.ac.il;uci.edu;cs.tau.ac.il", + "email": "cs.tau.ac.il;uci.edu;cs.tau.ac.il", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Tel Aviv University;University of California, Irvine", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.tau.ac.il;https://www.uci.edu", + "aff_unique_abbr": "TAU;UCI", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Irvine", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Israel;United States" + }, + { + "id": "2022.emnlp-main.34", + "title": "Unsupervised Boundary-Aware Language Model Pretraining for Chinese Sequence Labeling", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Boundary information is critical for various Chinese language processing tasks, such as word segmentation, part-of-speech tagging, and named entity recognition. Previous studies usually resorted to the use of a high-quality external lexicon, where lexicon items can offer explicit boundary information. However, to ensure the quality of the lexicon, great human effort is always necessary, which has been generally ignored. In this work, we suggest unsupervised statistical boundary information instead, and propose an architecture to encode the information directly into pre-trained language models, resulting in Boundary-Aware BERT (BABERT). We apply BABERT for feature induction of Chinese sequence labeling tasks. Experimental results on ten benchmarks of Chinese sequence labeling demonstrate that BABERT can provide consistent improvements on all datasets. In addition, our method can complement previous supervised lexicon exploration, where further improvements can be achieved when integrated with external lexicon information.", + "author": "Peijie Jiang; Dingkun Long; Yanzhao Zhang; Pengjun Xie; Meishan Zhang; Min Zhang", + "authorids": "/p/peijie-jiang/; /d/dingkun-long/; /y/yanzhao-zhang/; /p/pengjun-xie/; /m/meishan-zhang/; /m/min-zhang/", + "bibtex": "@inproceedings{jiang-etal-2022-unsupervised,\n title = \"Unsupervised Boundary-Aware Language Model Pretraining for {C}hinese Sequence Labeling\",\n author = \"Jiang, Peijie and\n Long, Dingkun and\n Zhang, Yanzhao and\n Xie, Pengjun and\n Zhang, Meishan and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.34/\",\n doi = \"10.18653/v1/2022.emnlp-main.34\",\n pages = \"526--537\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.34.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.34/", + "pdf_size": 616710, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11363573454036310974&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 3, + "aff": "School of New Media and Communication, Tianjin University, China; School of New Media and Communication, Tianjin University, China; School of New Media and Communication, Tianjin University, China; School of New Media and Communication, Tianjin University, China; Institute of Computing and Intelligence, Harbin Institute of Technology (Shenzhen); Institute of Computing and Intelligence, Harbin Institute of Technology (Shenzhen)", + "aff_domain": "tju.edu.cn;gmail.com;gmail.com;gmail.com;hit.edu.cn;hit.edu.cn", + "email": "tju.edu.cn;gmail.com;gmail.com;gmail.com;hit.edu.cn;hit.edu.cn", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;1", + "aff_unique_norm": "Tianjin University;Harbin Institute of Technology", + "aff_unique_dep": "School of New Media and Communication;Institute of Computing and Intelligence", + "aff_unique_url": "http://www.tju.edu.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": "Tianjin University;HIT", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-industry.32", + "title": "Unsupervised Dense Retrieval for Scientific Articles", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "In this work, we build a dense retrieval based semantic search engine on scientific articles from Elsevier. The major challenge is that there is no labeled data for training and testing. We apply a state-of-the-art unsupervised dense retrieval model called Generative Pseudo Labeling that generates high-quality pseudo training labels. Furthermore, since the articles are unbalanced across different domains, we select passages from multiple domains to form balanced training data. For the evaluation, we create two test sets: one manually annotated and one automatically created from the meta-information of our data. We compare the semantic search engine with the currently deployed lexical search engine on the two test sets. The results of the experiment show that the semantic search engine trained with pseudo training labels can significantly improve search performance.", + "author": "Dan Li; Vikrant Yadav; Zubair Afzal; George Tsatsaronis", + "authorids": "/d/dan-li/; /v/vikrant-yadav/; /z/zubair-afzal/; /g/george-tsatsaronis/", + "bibtex": "@inproceedings{li-etal-2022-unsupervised-dense,\n title = \"Unsupervised Dense Retrieval for Scientific Articles\",\n author = \"Li, Dan and\n Yadav, Vikrant and\n Afzal, Zubair and\n Tsatsaronis, George\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.32/\",\n doi = \"10.18653/v1/2022.emnlp-industry.32\",\n pages = \"313--321\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.32.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.32/", + "pdf_size": 584255, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=18020833209954003290&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "Elsevier; Elsevier; Elsevier; Elsevier", + "aff_domain": "elsevier.com;elsevier.com;elsevier.com;elsevier.com", + "email": "elsevier.com;elsevier.com;elsevier.com;elsevier.com", + "github": "", + "project": "https://www.sciencedirect.com", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Elsevier", + "aff_unique_dep": "", + "aff_unique_url": "https://www.elsevier.com", + "aff_unique_abbr": "Elsevier", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "Netherlands" + }, + { + "id": "2022.findings-emnlp.434", + "title": "Unsupervised Domain Adaptation for Joint Information Extraction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Joint Information Extraction (JIE) aims to jointly solve multiple tasks in the Information Extraction pipeline (e.g., entity mention, event trigger, relation, and event argument extraction). Due to their ability to leverage task dependencies and avoid error propagation, JIE models have presented state-of-the-art performance for different IE tasks. However, an issue with current JIE methods is that they only focus on standard supervised learning setting where training and test data comes from the same domain. Cross-domain/domain adaptation learning with training and test data in different domains have not been explored for JIE, thus hindering the application of this technology to different domains in practice. To address this issue, our work introduces the first study to evaluate performance of JIE models in unsupervised domain adaptation setting. In addition, we present a novel method to induce domain-invariant representations for the tasks in JIE, called Domain Adaptation for Joint Information Extraction (DA4JIE). In DA4JIE, we propose an Instance-relational Domain Adaptation mechanism that seeks to align representations of task instances in JIE across domains through a generalized version of domain-adversarial learning approach. We further devise a Context-invariant Structure Learning technique to filter domain-specialized contextual information from induced representations to boost performance of JIE models in new domains. Extensive experiments and analyses demonstrate that DA4JIE can significantly improve out-of-domain performance for current state-of-the-art JIE systems for all IE tasks.", + "author": "Nghia Ngo; Bonan Min; Thien Nguyen", + "authorids": "/n/nghia-ngo/; /b/bonan-min/; /t/thien-nguyen/", + "bibtex": "@inproceedings{ngo-etal-2022-unsupervised,\n title = \"Unsupervised Domain Adaptation for Joint Information Extraction\",\n author = \"Ngo, Nghia and\n Min, Bonan and\n Nguyen, Thien\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.434/\",\n doi = \"10.18653/v1/2022.findings-emnlp.434\",\n pages = \"5894--5905\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.434.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.434/", + "pdf_size": 723186, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5824576085427214329&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 4, + "aff": "Department of Computer Science, University of Oregon, Eugene, OR, USA; Amazon AWS AI Labs + Raytheon BBN Technologies; Department of Computer Science, University of Oregon, Eugene, OR, USA", + "aff_domain": "uoregon.edu;amazon.com;cs.uoregon.edu", + "email": "uoregon.edu;amazon.com;cs.uoregon.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;0", + "aff_unique_norm": "University of Oregon;Amazon;Raytheon BBN Technologies", + "aff_unique_dep": "Department of Computer Science;AWS AI Labs;", + "aff_unique_url": "https://www.uoregon.edu;https://aws.amazon.com;https://www.raytheonbbn.com", + "aff_unique_abbr": "UO;Amazon;Raytheon BBN", + "aff_campus_unique_index": "0;;0", + "aff_campus_unique": "Eugene;", + "aff_country_unique_index": "0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.638", + "title": "Unsupervised Entity Linking with Guided Summarization and Multiple-Choice Selection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Entity linking, the task of linking potentially ambiguous mentions in texts to corresponding knowledge-base entities, is an important component for language understanding. We address two challenge in entity linking: how to leverage wider contexts surrounding a mention, and how to deal with limited training data. We propose a fully unsupervised model called SumMC that first generates a guided summary of the contexts conditioning on the mention, and then casts the task to a multiple-choice problem where the model chooses an entity from a list of candidates. In addition to evaluating our model on existing datasets that focus on named entities, we create a new dataset that links noun phrases from WikiHow to Wikidata. We show that our SumMC model achieves state-of-the-art unsupervised performance on our new dataset and on exiting datasets.", + "author": "Young Min Cho; Li Zhang; Chris Callison-Burch", + "authorids": "/y/young-min-cho/; /l/li-zhang-upenn/; /c/chris-callison-burch/", + "bibtex": "@inproceedings{cho-etal-2022-unsupervised,\n title = \"Unsupervised Entity Linking with Guided Summarization and Multiple-Choice Selection\",\n author = \"Cho, Young Min and\n Zhang, Li and\n Callison-Burch, Chris\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.638/\",\n doi = \"10.18653/v1/2022.emnlp-main.638\",\n pages = \"9394--9401\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.638.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.638/", + "pdf_size": 569169, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=10549194002956098478&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "University of Pennsylvania; University of Pennsylvania; University of Pennsylvania", + "aff_domain": "seas.upenn.edu;seas.upenn.edu;seas.upenn.edu", + "email": "seas.upenn.edu;seas.upenn.edu;seas.upenn.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Pennsylvania", + "aff_unique_dep": "", + "aff_unique_url": "https://www.upenn.edu", + "aff_unique_abbr": "UPenn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.415", + "title": "Unsupervised Learning of Hierarchical Conversation Structure", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Human conversations can evolve in many different ways, creating challenges for automatic understanding and summarization. Goal-oriented conversations often have meaningful sub-dialogue structure, but it can be highly domain-dependent. This work introduces an unsupervised approach to learning hierarchical conversation structure, including turn and sub-dialogue segment labels, corresponding roughly to dialogue acts and sub-tasks, respectively. The decoded structure is shown to be useful in enhancing neural models of language for three conversation-level understanding tasks. Further, the learned finite-state sub-dialogue network is made interpretable through automatic summarization.", + "author": "Bo-Ru Lu; Yushi Hu; Hao Cheng; Noah A. Smith; Mari Ostendorf", + "authorids": "/b/bo-ru-lu/; /y/yushi-hu/; /h/hao-cheng/; /n/noah-a-smith/; /m/mari-ostendorf/", + "bibtex": "@inproceedings{lu-etal-2022-unsupervised,\n title = \"Unsupervised Learning of Hierarchical Conversation Structure\",\n author = \"Lu, Bo-Ru and\n Hu, Yushi and\n Cheng, Hao and\n Smith, Noah A. and\n Ostendorf, Mari\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.415/\",\n doi = \"10.18653/v1/2022.findings-emnlp.415\",\n pages = \"5657--5670\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.415.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.415/", + "pdf_size": 594880, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9212173201521125161&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "University of Washington\u2660; University of Washington\u2660; Microsoft Research\u2661; University of Washington\u2660\u2662; University of Washington\u2660", + "aff_domain": "washington.edu;washington.edu;microsoft.com;cs.washington.edu;washington.edu", + "email": "washington.edu;washington.edu;microsoft.com;cs.washington.edu;washington.edu", + "github": "https://github.com/boru-roylu/THETA", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;1;0;0", + "aff_unique_norm": "University of Washington;Microsoft Research", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.washington.edu;https://www.microsoft.com/en-us/research", + "aff_unique_abbr": "UW;MSR", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.366", + "title": "Unsupervised Multi-Granularity Summarization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Text summarization is a user-preference based task, i.e., for one document, users often have different priorities for the summary. As a key aspect of customization in summarization, granularity is used to measure the semantic coverage between the summary and source document. However, developing systems that can generate summaries with customizable semantic coverage is still an under-explored topic. In this paper, we propose the first unsupervised multi-granularity summarization framework, GranuSum. We take events as the basic semantic units of the source documents and propose to rank these events by their salience. We also develop a model to summarize input documents with given events as anchors and hints. By inputting different numbers of events, GranuSum is capable of producing multi-granular summaries in an unsupervised manner. Meanwhile, we annotate a new benchmark GranuDUC that contains multiple summaries at different granularities for each document cluster. Experimental results confirm the substantial superiority of GranuSum on multi-granularity summarization over strong baselines. Furthermore, by exploiting the event information, GranuSum also exhibits state-of-the-art performance under the conventional unsupervised abstractive setting.", + "author": "Ming Zhong; Yang Liu; Suyu Ge; Yuning Mao; Yizhu Jiao; Xingxing Zhang; Yichong Xu; Chenguang Zhu; Michael Zeng; Jiawei Han", + "authorids": "/m/ming-zhong/; /y/yang-liu/; /s/suyu-ge/; /y/yuning-mao/; /y/yizhu-jiao/; /x/xingxing-zhang/; /y/yichong-xu/; /c/chenguang-zhu/; /m/michael-zeng/; /j/jiawei-han/", + "bibtex": "@inproceedings{zhong-etal-2022-unsupervised,\n title = \"Unsupervised Multi-Granularity Summarization\",\n author = \"Zhong, Ming and\n Liu, Yang and\n Ge, Suyu and\n Mao, Yuning and\n Jiao, Yizhu and\n Zhang, Xingxing and\n Xu, Yichong and\n Zhu, Chenguang and\n Zeng, Michael and\n Han, Jiawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.366/\",\n doi = \"10.18653/v1/2022.findings-emnlp.366\",\n pages = \"4980--4995\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.366.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.366/", + "pdf_size": 313896, + "gs_citation": 14, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9775970909553424927&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of Illinois at Urbana-Champaign; Microsoft Cognitive Services Research; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; University of Illinois at Urbana-Champaign; Microsoft Research Asia; Microsoft Cognitive Services Research; Microsoft Cognitive Services Research; Microsoft Cognitive Services Research; University of Illinois at Urbana-Champaign", + "aff_domain": "illinois.edu;microsoft.com;illinois.edu;illinois.edu;illinois.edu;microsoft.com;microsoft.com;microsoft.com;microsoft.com;illinois.edu", + "email": "illinois.edu;microsoft.com;illinois.edu;illinois.edu;illinois.edu;microsoft.com;microsoft.com;microsoft.com;microsoft.com;illinois.edu", + "github": "https://github.com/maszhongming/GranuDUC", + "project": "", + "author_num": 10, + "aff_unique_index": "0;1;0;0;0;2;1;1;1;0", + "aff_unique_norm": "University of Illinois at Urbana-Champaign;Microsoft;Microsoft Research", + "aff_unique_dep": ";Cognitive Services Research;Research", + "aff_unique_url": "https://illinois.edu;https://www.microsoft.com;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "UIUC;Microsoft;MSR Asia", + "aff_campus_unique_index": "0;0;0;0;2;0", + "aff_campus_unique": "Urbana-Champaign;;Asia", + "aff_country_unique_index": "0;0;0;0;0;1;0;0;0;0", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-main.685", + "title": "Unsupervised Non-transferable Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Training a good deep learning model requires substantial data and computing resources, which makes the resulting neural model a valuable intellectual property. To prevent the neural network from being undesirably exploited, non-transferable learning has been proposed to reduce the model generalization ability in specific target domains. However, existing approaches require labeled data for the target domain which can be difficult to obtain. Furthermore, they do not have the mechanism to still recover the model\u2019s ability to access the target domain.In this paper, we propose a novel unsupervised non-transferable learning method for the text classification task that does not require annotated target domain data. We further introduce a secret key component in our approach for recovering the access to the target domain, where we design both an explicit and an implicit method for doing so. Extensive experiments demonstrate the effectiveness of our approach.", + "author": "Guangtao Zeng; Wei Lu", + "authorids": "/g/guangtao-zeng/; /w/wei-lu/", + "bibtex": "@inproceedings{zeng-lu-2022-unsupervised,\n title = \"Unsupervised Non-transferable Text Classification\",\n author = \"Zeng, Guangtao and\n Lu, Wei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.685/\",\n doi = \"10.18653/v1/2022.emnlp-main.685\",\n pages = \"10071--10084\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.685.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.685/", + "pdf_size": 1157389, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7817012853829068359&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 3, + "aff": "StatNLP Research Group; StatNLP Research Group", + "aff_domain": "mymail.sutd.edu.sg;sutd.edu.sg", + "email": "mymail.sutd.edu.sg;sutd.edu.sg", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "StatNLP Research Group", + "aff_unique_dep": "Research Group", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "", + "aff_country_unique": "" + }, + { + "id": "2022.emnlp-main.589", + "title": "Unsupervised Opinion Summarisation in the Wasserstein Space", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Opinion summarisation synthesises opinions expressed in a group of documents discussingthe same topic to produce a single summary. Recent work has looked at opinion summarisation of clusters of social media posts. Such posts are noisy and have unpredictable structure, posing additional challenges for the construction of the summary distribution and the preservation of meaning compared to online reviews, which has been so far the focus on opinion summarisation. To address these challenges we present WassOS, an unsupervised abstractive summarization model which makesuse of the Wasserstein distance. A Variational Autoencoder is first used to obtain the distribution of documents/posts, and the summary distribution is obtained as the Wasserstein barycenter. We create separate disentangled latent semantic and syntactic representations of the summary, which are fed into a GRU decoder with a transformer layer to produce the final summary. Our experiments onmultiple datasets including reviews, Twitter clusters and Reddit threads show that WassOSalmost always outperforms the state-of-the-art on ROUGE metrics and consistently producesthe best summaries with respect to meaning preservation according to human evaluations.", + "author": "Jiayu Song; Iman Munire Bilal; Adam Tsakalidis; Rob Procter; Maria Liakata", + "authorids": "/j/jiayu-song/; /i/iman-munire-bilal/; /a/adam-tsakalidis/; /r/rob-procter/; /m/maria-liakata/", + "bibtex": "@inproceedings{song-etal-2022-unsupervised,\n title = \"Unsupervised Opinion Summarisation in the {W}asserstein Space\",\n author = \"Song, Jiayu and\n Bilal, Iman Munire and\n Tsakalidis, Adam and\n Procter, Rob and\n Liakata, Maria\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.589/\",\n doi = \"10.18653/v1/2022.emnlp-main.589\",\n pages = \"8592--8607\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.589.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.589/", + "pdf_size": 483413, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12666269513177665719&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "Queen Mary University of London, London, UK; University of Warwick, Coventry, UK + The Alan Turing Institute, London, UK; Queen Mary University of London, London, UK + The Alan Turing Institute, London, UK; University of Warwick, Coventry, UK + The Alan Turing Institute, London, UK; Queen Mary University of London, London, UK + University of Warwick, Coventry, UK + The Alan Turing Institute, London, UK", + "aff_domain": "qmul.ac.uk; ;qmul.ac.uk; ;qmul.ac.uk", + "email": "qmul.ac.uk; ;qmul.ac.uk; ;qmul.ac.uk", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1+2;0+2;1+2;0+1+2", + "aff_unique_norm": "Queen Mary University of London;University of Warwick;The Alan Turing Institute", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.qmul.ac.uk;https://www.warwick.ac.uk;https://www.turing.ac.uk", + "aff_unique_abbr": "QMUL;Warwick;ATI", + "aff_campus_unique_index": "0;1+0;0+0;1+0;0+1+0", + "aff_campus_unique": "London;Coventry", + "aff_country_unique_index": "0;0+0;0+0;0+0;0+0+0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.findings-emnlp.111", + "title": "Unsupervised Syntactically Controlled Paraphrase Generation with Abstract Meaning Representations", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Syntactically controlled paraphrase generation has become an emerging research direction in recent years. Most existing approaches require annotated paraphrase pairs for training and are thus costly to extend to new domains. Unsupervised approaches, on the other hand, do not need paraphrase pairs but suffer from relatively poor performance in terms of syntactic control and quality of generated paraphrases. In this paper, we demonstrate that leveraging Abstract Meaning Representations (AMR) can greatly improve the performance of unsupervised syntactically controlled paraphrase generation.Our proposed model, AMR-enhanced Paraphrase Generator (AMRPG), separately encodes the AMR graph and the constituency parse of the input sentence into two disentangled semantic and syntactic embeddings. A decoder is then learned to reconstruct the input sentence from the semantic and syntactic embeddings. Our experiments show that AMRPG generates more accurate syntactically controlled paraphrases, both quantitatively and qualitatively, compared to the existing unsupervised approaches. We also demonstrate that the paraphrases generated by AMRPG can be used for data augmentation to improve the robustness of NLP models.", + "author": "Kuan-Hao Huang; Varun Iyer; Anoop Kumar; Sriram Venkatapathy; Kai-Wei Chang; Aram Galstyan", + "authorids": "/k/kuan-hao-huang/; /v/varun-iyer/; /a/anoop-kumar/; /s/sriram-venkatapathy/; /k/kai-wei-chang/; /a/aram-galstyan/", + "bibtex": "@inproceedings{huang-etal-2022-unsupervised-syntactically,\n title = \"Unsupervised Syntactically Controlled Paraphrase Generation with {A}bstract {M}eaning {R}epresentations\",\n author = \"Huang, Kuan-Hao and\n Iyer, Varun and\n Kumar, Anoop and\n Venkatapathy, Sriram and\n Chang, Kai-Wei and\n Galstyan, Aram\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.111/\",\n doi = \"10.18653/v1/2022.findings-emnlp.111\",\n pages = \"1547--1554\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.111.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.111/", + "pdf_size": 377535, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7245970952233439343&as_sdt=4005&sciodt=0,6&hl=en", + "gs_version_total": 5, + "aff": "University of California, Los Angeles + Amazon Alexa AI; Johns Hopkins University + Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; University of California, Los Angeles + Amazon Alexa AI; Amazon Alexa AI", + "aff_domain": "cs.ucla.edu;jhu.edu;amazon.com;amazon.com;cs.ucla.edu;amazon.com", + "email": "cs.ucla.edu;jhu.edu;amazon.com;amazon.com;cs.ucla.edu;amazon.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2+1;1;1;0+1;1", + "aff_unique_norm": "University of California, Los Angeles;Amazon;Johns Hopkins University", + "aff_unique_dep": ";Alexa AI;", + "aff_unique_url": "https://www.ucla.edu;https://www.amazon.com;https://www.jhu.edu", + "aff_unique_abbr": "UCLA;Amazon;JHU", + "aff_campus_unique_index": "0;;0", + "aff_campus_unique": "Los Angeles;", + "aff_country_unique_index": "0+0;0+0;0;0;0+0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-industry.1", + "title": "Unsupervised Term Extraction for Highly Technical Domains", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "Term extraction is an information extraction task at the root of knowledge discovery platforms. Developing term extractors that are able to generalize across very diverse and potentially highly technical domains is challenging, as annotations for domains requiring in-depth expertise are scarce and expensive to obtain. In this paper, we describe the term extraction subsystem of a commercial knowledge discovery platform that targets highly technical fields such as pharma, medical, and material science. To be able to generalize across domains, we introduce a fully unsupervised annotator (UA). It extracts terms by combining novel morphological signals from sub-word tokenization with term-to-topic and intra-term similarity metrics, computed using general-domain pre-trained sentence-encoders. The annotator is used to implement a weakly-supervised setup, where transformer-models are fine-tuned (or pre-trained) over the training data generated by running the UA over large unlabeled corpora. Our experiments demonstrate that our setup can improve the predictive performance while decreasing the inference latency on both CPUs and GPUs. Our annotators provide a very competitive baseline for all the cases where annotations are not available.", + "author": "Francesco Fusco; Peter Staar; Diego Antognini", + "authorids": "/f/francesco-fusco/; /p/peter-staar/; /d/diego-antognini/", + "bibtex": "@inproceedings{fusco-etal-2022-unsupervised,\n title = \"Unsupervised Term Extraction for Highly Technical Domains\",\n author = \"Fusco, Francesco and\n Staar, Peter and\n Antognini, Diego\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.1/\",\n doi = \"10.18653/v1/2022.emnlp-industry.1\",\n pages = \"1--8\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.1.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.1/", + "pdf_size": 469349, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6754427718700411476&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 9, + "aff": "IBM Research; IBM Research; IBM Research", + "aff_domain": "zurich.ibm.com;zurich.ibm.com;ibm.com", + "email": "zurich.ibm.com;zurich.ibm.com;ibm.com", + "github": "", + "project": "https://ds4sd.github.io", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "IBM", + "aff_unique_dep": "IBM Research", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.352", + "title": "Unsupervised Text Deidentification", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Deidentification seeks to anonymize textual data prior to distribution. Automatic deidentification primarily uses supervised named entity recognition from human-labeled data points. We propose an unsupervised deidentification method that masks words that leak personally-identifying information. The approach utilizes a specially trained reidentification model to identify individuals from redacted personal documents. Motivated by K-anonymity based privacy, we generate redactions that ensure a minimum reidentification rank for the correct profile of the document. To evaluate this approach, we consider the task of deidentifying Wikipedia Biographies, and evaluate using an adversarial reidentification metric. Compared to a set of unsupervised baselines, our approach deidentifies documents more completely while removing fewer words. Qualitatively, we see that the approach eliminates many identifying aspects that would fall outside of the common named entity based approach.", + "author": "John Morris; Justin Chiu; Ramin Zabih; Alexander Rush", + "authorids": "/j/john-morris/; /j/justin-chiu/; /r/ramin-zabih/; /a/alexander-m-rush/", + "bibtex": "@inproceedings{morris-etal-2022-unsupervised,\n title = \"Unsupervised Text Deidentification\",\n author = \"Morris, John and\n Chiu, Justin and\n Zabih, Ramin and\n Rush, Alexander\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.352/\",\n doi = \"10.18653/v1/2022.findings-emnlp.352\",\n pages = \"4777--4788\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.352.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.352/", + "pdf_size": 677781, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5902562564495355938&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Cornell University; Cornell University; Cornell University; Cornell University", + "aff_domain": "cornell.edu; ; ;cornell.edu", + "email": "cornell.edu; ; ;cornell.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Cornell University", + "aff_unique_dep": "", + "aff_unique_url": "https://www.cornell.edu", + "aff_unique_abbr": "Cornell", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.239", + "title": "Unsupervised Tokenization Learning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In the presented study, we discover that the so-called \u201ctransition freedom\u201d metric appears superior for unsupervised tokenization purposes in comparison to statistical metrics such as mutual information and conditional probability, providing F-measure scores in range from 0.71 to 1.0 across explored multilingual corpora. We find that different languages require different offshoots of that metric (such as derivative, variance, and \u201cpeak values\u201d) for successful tokenization. Larger training corpora do not necessarily result in better tokenization quality, while compressing the models by eliminating statistically weak evidence tends to improve performance. The proposed unsupervised tokenization technique provides quality better than or comparable to lexicon-based ones, depending on the language.", + "author": "Anton Kolonin; Vignav Ramesh", + "authorids": "/a/anton-kolonin/; /v/vignav-ramesh/", + "bibtex": "@inproceedings{kolonin-ramesh-2022-unsupervised,\n title = \"Unsupervised Tokenization Learning\",\n author = \"Kolonin, Anton and\n Ramesh, Vignav\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.239/\",\n doi = \"10.18653/v1/2022.emnlp-main.239\",\n pages = \"3649--3664\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.239.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.239/", + "pdf_size": 5798100, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11736471289157170255&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff": ";", + "aff_domain": ";", + "email": ";", + "github": "", + "project": "", + "author_num": 2 + }, + { + "id": "2022.emnlp-industry.15", + "title": "Unsupervised training data re-weighting for natural language understanding with local distribution approximation", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "One of the major challenges of training Natural Language Understanding (NLU) production models lies in the discrepancy between the distributions of the offline training data and of the online live data, due to, e.g., biased sampling scheme, cyclic seasonality shifts, annotated training data coming from a variety of different sources, and a changing pool of users. Consequently, the model trained by the offline data is biased. We often observe this problem especially in task-oriented conversational systems, where topics of interest and the characteristics of users using the system change over time. In this paper we propose an unsupervised approach to mitigate the offline training data sampling bias in multiple NLU tasks. We show that a local distribution approximation in the pre-trained embedding space enables the estimation of importance weights for training samples guiding re-sampling for an effective bias mitigation. We illustrate our novel approach using multiple NLU datasets and show improvements obtained without additional annotation, making this a general approach for mitigating effects of sampling bias.", + "author": "Jose Garrido Ramas; Dieu-thu Le; Bei Chen; Manoj Kumar; Kay Rottmann", + "authorids": "/j/jose-garrido-ramas/; /d/dieu-thu-le/; /b/bei-chen/; /m/manoj-kumar/; /k/kay-rottmann/", + "bibtex": "@inproceedings{garrido-ramas-etal-2022-unsupervised,\n title = \"Unsupervised training data re-weighting for natural language understanding with local distribution approximation\",\n author = \"Garrido Ramas, Jose and\n Le, Dieu-thu and\n Chen, Bei and\n Kumar, Manoj and\n Rottmann, Kay\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.15/\",\n doi = \"10.18653/v1/2022.emnlp-industry.15\",\n pages = \"154--160\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.15.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.15/", + "pdf_size": 515860, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12825224859701887966&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff": "Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI; Amazon Alexa AI", + "aff_domain": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "email": "amazon.com;amazon.com;amazon.com;amazon.com;amazon.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Amazon", + "aff_unique_dep": "Alexa AI", + "aff_unique_url": "https://www.amazon.com", + "aff_unique_abbr": "Amazon", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.79", + "title": "Using Commonsense Knowledge to Answer Why-Questions", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Answering questions in narratives about why events happened often requires commonsense knowledge external to the text. What aspects of this knowledge are available in large language models? What aspects can be made accessible via external commonsense resources? We study these questions in the context of answering questions in the TellMeWhy dataset using COMET as a source of relevant commonsense relations. We analyze the effects of model size (T5 and GPT3) along with methods of injecting knowledge (COMET) into these models. Results show that the largest models, as expected, yield substantial improvements over base models. Injecting external knowledge helps models of various sizes, but the amount of improvement decreases with larger model size. We also find that the format in which knowledge is provided is critical, and that smaller models benefit more from larger amounts of knowledge. Finally, we develop an ontology of knowledge types and analyze the relative coverage of the models across these categories.", + "author": "Yash Kumar Lal; Niket Tandon; Tanvi Aggarwal; Horace Liu; Nathanael Chambers; Raymond Mooney; Niranjan Balasubramanian", + "authorids": "/y/yash-kumar-lal/; /n/niket-tandon/; /t/tanvi-aggarwal/; /h/horace-liu/; /n/nathanael-chambers/; /r/raymond-mooney/; /n/niranjan-balasubramanian/", + "bibtex": "@inproceedings{lal-etal-2022-using,\n title = \"Using Commonsense Knowledge to Answer Why-Questions\",\n author = \"Lal, Yash Kumar and\n Tandon, Niket and\n Aggarwal, Tanvi and\n Liu, Horace and\n Chambers, Nathanael and\n Mooney, Raymond and\n Balasubramanian, Niranjan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.79/\",\n doi = \"10.18653/v1/2022.emnlp-main.79\",\n pages = \"1204--1219\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.79.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.79/", + "pdf_size": 410120, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8488501117182257644&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Stony Brook University; Allen Institute for AI; Stony Brook University; Stony Brook University; US Naval Academy; University of Texas at Austin; Stony Brook University", + "aff_domain": "cs.stonybrook.edu; ; ; ; ; ; ", + "email": "cs.stonybrook.edu; ; ; ; ; ; ", + "github": "https://github.com/StonyBrookNLP/knowwhy", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;0;2;3;0", + "aff_unique_norm": "Stony Brook University;Allen Institute for AI;United States Naval Academy;University of Texas at Austin", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.stonybrook.edu;https://allenai.org;https://www.usna.edu;https://www.utexas.edu", + "aff_unique_abbr": "SBU;AI2;USNA;UT Austin", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Austin", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.169", + "title": "Using Developer Discussions to Guide Fixing Bugs in Software", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Automatically fixing software bugs is a challenging task. While recent work showed that natural language context is useful in guiding bug-fixing models, the approach required prompting developers to provide this context, which was simulated through commit messages written after the bug-fixing code changes were made. We instead propose using bug report discussions, which are available before the task is performed and are also naturally occurring, avoiding the need for any additional information from developers. For this, we augment standard bug-fixing datasets with bug report discussions. Using these newly compiled datasets, we demonstrate that various forms of natural language context derived from such discussions can aid bug-fixing, even leading to improved performance over using commit messages corresponding to the oracle bug-fixing commits.", + "author": "Sheena Panthaplackel; Milos Gligoric; Junyi Jessy Li; Raymond Mooney", + "authorids": "/s/sheena-panthaplackel/; /m/milos-gligoric/; /j/junyi-jessy-li/; /r/raymond-mooney/", + "bibtex": "@inproceedings{panthaplackel-etal-2022-using,\n title = \"Using Developer Discussions to Guide Fixing Bugs in Software\",\n author = \"Panthaplackel, Sheena and\n Gligoric, Milos and\n Li, Junyi Jessy and\n Mooney, Raymond\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.169/\",\n doi = \"10.18653/v1/2022.findings-emnlp.169\",\n pages = \"2292--2301\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.169.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.169/", + "pdf_size": 406095, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7163019193619488412&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science; Department of Electrical and Computer Engineering; Department of Linguistics; Department of Computer Science", + "aff_domain": "cs.utexas.edu;utexas.edu;austin.utexas.edu;cs.utexas.edu", + "email": "cs.utexas.edu;utexas.edu;austin.utexas.edu;cs.utexas.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;2;0", + "aff_unique_norm": "Unknown Institution;;University Affiliation Not Specified", + "aff_unique_dep": "Department of Computer Science;Department of Electrical and Computer Engineering;Department of Linguistics", + "aff_unique_url": ";;", + "aff_unique_abbr": ";;", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "", + "aff_country_unique": "" + }, + { + "id": "2022.findings-emnlp.12", + "title": "Utilizing Language-Image Pretraining for Efficient and Robust Bilingual Word Alignment", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Word translation without parallel corpora has become feasible, rivaling the performance of supervised methods. Recent findings have shown the improvement in accuracy and robustness of unsupervised word translation (UWT) by utilizing visual observations, which are universal representations across languages.Our work investigates the potential of using not only visual observations but also pretrained language-image models for enabling a more efficient and robust UWT. We develop a novel UWT method dubbed Word Alignment using Language-Image Pretraining (WALIP), leveraging visual observations via the shared image-text embedding space of CLIPs (Radford et al., 2021). WALIP has a two-step procedure. First, we retrieve word pairs with high confidences of similarity, computed using our proposed image-based fingerprints, which define the initial pivot for the alignment.Second, we apply our robust Procrustes algorithm to estimate the linear mapping between two embedding spaces, which iteratively corrects and refines the estimated alignment.Our extensive experiments show that WALIP improves upon the state-of-the-art performance of bilingual word alignment for a few language pairs across different word embeddings and displays great robustness to the dissimilarity of language pairs or training corpora for two word embeddings.", + "author": "Tuan Dinh; Jy-yong Sohn; Shashank Rajput; Timothy Ossowski; Yifei Ming; Junjie Hu; Dimitris Papailiopoulos; Kangwook Lee", + "authorids": "/t/tuan-dinh/; /j/jy-yong-sohn/; /s/shashank-rajput/; /t/timothy-ossowski/; /y/yifei-ming/; /j/junjie-hu/; /d/dimitris-papailiopoulos/; /k/kangwook-lee/", + "bibtex": "@inproceedings{dinh-etal-2022-utilizing,\n title = \"Utilizing Language-Image Pretraining for Efficient and Robust Bilingual Word Alignment\",\n author = \"Dinh, Tuan and\n Sohn, Jy-yong and\n Rajput, Shashank and\n Ossowski, Timothy and\n Ming, Yifei and\n Hu, Junjie and\n Papailiopoulos, Dimitris and\n Lee, Kangwook\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.12/\",\n doi = \"10.18653/v1/2022.findings-emnlp.12\",\n pages = \"154--168\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.12.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.12/", + "pdf_size": 2503867, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1765867898851794432&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "University of Wisconsin, Madison, WI, USA; University of Wisconsin, Madison, WI, USA; University of Wisconsin, Madison, WI, USA; University of Wisconsin, Madison, WI, USA; University of Wisconsin, Madison, WI, USA; University of Wisconsin, Madison, WI, USA; University of Wisconsin, Madison, WI, USA; University of Wisconsin, Madison, WI, USA", + "aff_domain": "wisc.edu; ; ; ; ; ; ; ", + "email": "wisc.edu; ; ; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;0;0;0", + "aff_unique_norm": "University of Wisconsin-Madison", + "aff_unique_dep": "", + "aff_unique_url": "https://www.wisc.edu", + "aff_unique_abbr": "UW-Madison", + "aff_campus_unique_index": "0;0;0;0;0;0;0;0", + "aff_campus_unique": "Madison", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.59", + "title": "VIRT: Improving Representation-based Text Matching via Virtual Interaction", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Text matching is a fundamental research problem in natural language understanding. Interaction-based approaches treat the text pair as a single sequence and encode it through cross encoders, while representation-based models encode the text pair independently with siamese or dual encoders. Interaction-based models require dense computations and thus are impractical in real-world applications. Representation-based models have become the mainstream paradigm for efficient text matching. However, these models suffer from severe performance degradation due to the lack of interactions between the pair of texts. To remedy this, we propose a Virtual InteRacTion mechanism (VIRT) for improving representation-based text matching while maintaining its efficiency. In particular, we introduce an interactive knowledge distillation module that is only applied during training. It enables deep interaction between texts by effectively transferring knowledge from the interaction-based model. A light interaction strategy is designed to fully leverage the learned interactive knowledge. Experimental results on six text matching benchmarks demonstrate the superior performance of our method over several state-of-the-art representation-based models. We further show that VIRT can be integrated into existing methods as plugins to lift their performances.", + "author": "Dan Li; Yang Yang; Hongyin Tang; Jiahao Liu; Qifan Wang; Jingang Wang; Tong Xu; Wei Wu; Enhong Chen", + "authorids": "/d/dan-li/; /y/yang-yang/; /h/hongyin-tang/; /j/jiahao-liu/; /q/qifan-wang/; /j/jingang-wang/; /t/tong-xu/; /w/wei-wu/; /e/enhong-chen/", + "bibtex": "@inproceedings{li-etal-2022-virt,\n title = \"{VIRT}: Improving Representation-based Text Matching via Virtual Interaction\",\n author = \"Li, Dan and\n Yang, Yang and\n Tang, Hongyin and\n Liu, Jiahao and\n Wang, Qifan and\n Wang, Jingang and\n Xu, Tong and\n Wu, Wei and\n Chen, Enhong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.59/\",\n doi = \"10.18653/v1/2022.emnlp-main.59\",\n pages = \"914--925\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.59.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.59/", + "pdf_size": 929549, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=567169094431430749&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 0, + "aff": "University of Science and Technology of China; Meituan; Meituan; Meituan; MetaAI; Meituan; University of Science and Technology of China; Meituan; University of Science and Technology of China", + "aff_domain": "mail.ustc.edu.cn;meituan.com;meituan.com;meituan.com;fb.com;meituan.com;mail.ustc.edu.cn;gmail.com;mail.ustc.edu.cn", + "email": "mail.ustc.edu.cn;meituan.com;meituan.com;meituan.com;fb.com;meituan.com;mail.ustc.edu.cn;gmail.com;mail.ustc.edu.cn", + "github": "", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;1;1;2;1;0;1;0", + "aff_unique_norm": "University of Science and Technology of China;Meituan;MetaAI", + "aff_unique_dep": ";;", + "aff_unique_url": "http://www.ustc.edu.cn;https://www.meituan.com;https://meta.ai", + "aff_unique_abbr": "USTC;Meituan;MetaAI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;1;0;0;0;0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.447", + "title": "Validity Assessment of Legal Will Statements as Natural Language Inference", + "track": "main", + "status": "finding", + "award": false, + "abstract": "This work introduces a natural language inference (NLI) dataset that focuses on the validity of statements in legal wills. This dataset is unique because: (a) each entailment decision requires three inputs: the statement from the will, the law, and the conditions that hold at the time of the testator\u2019s death; and (b) the included texts are longer than the ones in current NLI datasets. We trained eight neural NLI models in this dataset. All the models achieve more than 80% macro F1 and accuracy, which indicates that neural approaches can handle this task reasonably well. However, group accuracy, a stricter evaluation measure that is calculated with a group of positive and negative examples generated from the same statement as a unit, is in mid 80s at best, which suggests that the models\u2019 understanding of the task remains superficial. Further ablative analyses and explanation experiments indicate that all three text segments are used for prediction, but some decisions rely on semantically irrelevant tokens. This indicates that overfitting on these longer texts likely happens, and that additional research is required for this task to be solved.", + "author": "Alice Kwak; Jacob Israelsen; Clayton Morrison; Derek Bambauer; Mihai Surdeanu", + "authorids": "/a/alice-kwak/; /j/jacob-israelsen/; /c/clayton-morrison/; /d/derek-bambauer/; /m/mihai-surdeanu/", + "bibtex": "@inproceedings{kwak-etal-2022-validity,\n title = \"Validity Assessment of Legal Will Statements as Natural Language Inference\",\n author = \"Kwak, Alice and\n Israelsen, Jacob and\n Morrison, Clayton and\n Bambauer, Derek and\n Surdeanu, Mihai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.447/\",\n doi = \"10.18653/v1/2022.findings-emnlp.447\",\n pages = \"6047--6056\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.447.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.447/", + "pdf_size": 2600295, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13832520603238566432&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "The University of Arizona, Tucson, Arizona, USA; The University of Arizona, Tucson, Arizona, USA; The University of Arizona, Tucson, Arizona, USA; The University of Arizona, Tucson, Arizona, USA; The University of Arizona, Tucson, Arizona, USA", + "aff_domain": "arizona.edu;arizona.edu;arizona.edu;arizona.edu;arizona.edu", + "email": "arizona.edu;arizona.edu;arizona.edu;arizona.edu;arizona.edu", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "The University of Arizona", + "aff_unique_dep": "", + "aff_unique_url": "https://www.arizona.edu", + "aff_unique_abbr": "UA", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Tucson", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.468", + "title": "VarMAE: Pre-training of Variational Masked Autoencoder for Domain-adaptive Language Understanding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Pre-trained language models have been widely applied to standard benchmarks. Due to the flexibility of natural language, the available resources in a certain domain can be restricted to support obtaining precise representation. To address this issue, we propose a novel Transformer-based language model named VarMAE for domain-adaptive language understanding. Under the masked autoencoding objective, we design a context uncertainty learning module to encode the token\u2019s context into a smooth latent distribution. The module can produce diverse and well-formed contextual representations. Experiments on science- and finance-domain NLU tasks demonstrate that VarMAE can be efficiently adapted to new domains with limited resources.", + "author": "Dou Hu; Xiaolong Hou; Xiyang Du; Mengyuan Zhou; Lianxin Jiang; Yang Mo; Xiaofeng Shi", + "authorids": "/d/dou-hu/; /x/xiaolong-hou/; /x/xiyang-du/; /m/mengyuan-zhou/; /l/lianxin-jiang/; /y/yang-mo/; /x/xiaofeng-shi/", + "bibtex": "@inproceedings{hu-etal-2022-varmae,\n title = \"{V}ar{MAE}: Pre-training of Variational Masked Autoencoder for Domain-adaptive Language Understanding\",\n author = \"Hu, Dou and\n Hou, Xiaolong and\n Du, Xiyang and\n Zhou, Mengyuan and\n Jiang, Lianxin and\n Mo, Yang and\n Shi, Xiaofeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.468/\",\n doi = \"10.18653/v1/2022.findings-emnlp.468\",\n pages = \"6276--6286\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.468.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.468/", + "pdf_size": 842641, + "gs_citation": 9, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5353155830232071727&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 4, + "aff": "Institute of Information Engineering, Chinese Academy of Sciences+School of Cyber Security, University of Chinese Academy of Sciences+Ping An Life Insurance Company of China, Ltd.; Ping An Life Insurance Company of China, Ltd.; Ping An Life Insurance Company of China, Ltd.; Ping An Life Insurance Company of China, Ltd.; Ping An Life Insurance Company of China, Ltd.; Ping An Life Insurance Company of China, Ltd.; Ping An Life Insurance Company of China, Ltd.", + "aff_domain": "iie.ac.cn;pingan.com.cn;pingan.com.cn;pingan.com.cn;pingan.com.cn;pingan.com.cn;pingan.com.cn", + "email": "iie.ac.cn;pingan.com.cn;pingan.com.cn;pingan.com.cn;pingan.com.cn;pingan.com.cn;pingan.com.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1+2;2;2;2;2;2;2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Ping An Life Insurance Company of China", + "aff_unique_dep": "Institute of Information Engineering;School of Cyber Security;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.paligroup.com", + "aff_unique_abbr": "CAS;UCAS;Ping An", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0+0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.706", + "title": "Variational Autoencoder with Disentanglement Priors for Low-Resource Task-Specific Natural Language Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In this paper, we propose a variational autoencoder with disentanglement priors, VAE-Dprior, for task-specific natural language generation with none or a handful of task-specific labeled examples. In order to tackle compositional generalization across tasks, our model performs disentangled representation learning by introducing a conditional prior for the latent content space and another conditional prior for the latent label space. Both types of priors satisfy a novel property called \ud835\udf16-disentangled. We show both empirically and theoretically that the novel priors can disentangle representations even without specific regularizations as in the prior work. The content prior enables directly sampling diverse content representations from the content space learned from the seen tasks, and fuse them with the representations of novel tasks for generating semantically diverse texts in the low-resource settings. Our extensive experiments demonstrate the superior performance of our model over competitive baselines in terms of i) data augmentation in continuous zero/few-shot learning, and ii) text style transfer in the few-shot setting.", + "author": "Zhuang Li; Lizhen Qu; Qiongkai Xu; Tongtong Wu; Tianyang Zhan; Gholamreza Haffari", + "authorids": "/z/zhuang-li/; /l/lizhen-qu/; /q/qiongkai-xu/; /t/tongtong-wu/; /t/tianyang-zhan/; /g/gholamreza-haffari/", + "bibtex": "@inproceedings{li-etal-2022-variational-autoencoder,\n title = \"Variational Autoencoder with Disentanglement Priors for Low-Resource Task-Specific Natural Language Generation\",\n author = \"Li, Zhuang and\n Qu, Lizhen and\n Xu, Qiongkai and\n Wu, Tongtong and\n Zhan, Tianyang and\n Haffari, Gholamreza\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.706/\",\n doi = \"10.18653/v1/2022.emnlp-main.706\",\n pages = \"10335--10356\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.706.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.706/", + "pdf_size": 1032684, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11582953043325625364&as_sdt=20005&sciodt=0,9&hl=en", + "gs_version_total": 6, + "aff": "Monash University, Australia; Monash University, Australia + The University of Melbourne, Australia; The University of Melbourne, Australia; Monash University, Australia; Monash University, Australia + The University of Melbourne, Australia; Monash University, Australia", + "aff_domain": "monash.edu;monash.edu;unimelb.edu.au;monash.edu;student.monash.edu;monash.edu", + "email": "monash.edu;monash.edu;unimelb.edu.au;monash.edu;student.monash.edu;monash.edu", + "github": "https://github.com/zhuang-li/VAE-DPrior", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;1;0;0+1;0", + "aff_unique_norm": "Monash University;The University of Melbourne", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.monash.edu;https://www.unimelb.edu.au", + "aff_unique_abbr": "Monash;UniMelb", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0+0;0;0;0+0;0", + "aff_country_unique": "Australia" + }, + { + "id": "2022.emnlp-main.163", + "title": "Varifocal Question Generation for Fact-checking", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Fact-checking requires retrieving evidence related to a claim under investigation. The task can be formulated as question generation based on a claim, followed by question answering.However, recent question generation approaches assume that the answer is known and typically contained in a passage given as input,whereas such passages are what is being sought when verifying a claim.In this paper, we present Varifocal, a method that generates questions based on different focal points within a given claim, i.e. different spans of the claim and its metadata, such as its source and date.Our method outperforms previous work on a fact-checking question generation dataset on a wide range of automatic evaluation metrics.These results are corroborated by our manual evaluation, which indicates that our method generates more relevant and informative questions.We further demonstrate the potential of focal points in generating sets of clarification questions for product descriptions.", + "author": "Nedjma Ousidhoum; Zhangdie Yuan; Andreas Vlachos", + "authorids": "/n/nedjma-ousidhoum/; /z/zhangdie-yuan/; /a/andreas-vlachos/", + "bibtex": "@inproceedings{ousidhoum-etal-2022-varifocal,\n title = \"Varifocal Question Generation for Fact-checking\",\n author = \"Ousidhoum, Nedjma and\n Yuan, Zhangdie and\n Vlachos, Andreas\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.163/\",\n doi = \"10.18653/v1/2022.emnlp-main.163\",\n pages = \"2532--2544\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.163.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.163/", + "pdf_size": 792704, + "gs_citation": 26, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5480940533968227392&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science and Technology, University of Cambridge; Department of Computer Science and Technology, University of Cambridge; Department of Computer Science and Technology, University of Cambridge", + "aff_domain": "cam.ac.uk;cam.ac.uk;cam.ac.uk", + "email": "cam.ac.uk;cam.ac.uk;cam.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Cambridge", + "aff_unique_dep": "Department of Computer Science and Technology", + "aff_unique_url": "https://www.cam.ac.uk", + "aff_unique_abbr": "Cambridge", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Cambridge", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.455", + "title": "Vector-Quantized Input-Contextualized Soft Prompts for Natural Language Understanding", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prompt Tuning has been largely successful as a parameter-efficient method of conditioning large-scale pre-trained language models to perform downstream tasks. Thus far, soft prompt tuning learns a fixed set of task-specific continuous vectors, i.e., soft tokens that remain static across the task samples. A fixed prompt, however, may not generalize well to the diverse kinds of inputs the task comprises. In order to address this, we propose Vector-quantized Input-contextualized Prompts (VIP) as an extension to the soft prompt tuning framework. VIP particularly focuses on two aspects\u2014contextual prompts that learns input-specific contextualization of the soft prompt tokens through a small-scale sentence encoder and quantized prompts that maps the contextualized prompts to a set of learnable codebook vectors through a Vector quantization network. On various language understanding tasks like SuperGLUE, QA, Relation classification, NER and NLI, VIP outperforms the soft prompt tuning (PT) baseline by an average margin of 1.19%. Further, our generalization studies show that VIP learns more robust prompt representations, surpassing PT by a margin of 0.6% - 5.3% on Out-of-domain QA and NLI tasks respectively, and by 0.75% on Multi-Task setup over 4 tasks spanning across 12 domains.", + "author": "Rishabh Bhardwaj; Amrita Saha; Steven C.H. Hoi; Soujanya Poria", + "authorids": "/r/rishabh-bhardwaj/; /a/amrita-saha/; /s/steven-c-h-hoi/; /s/soujanya-poria/", + "bibtex": "@inproceedings{bhardwaj-etal-2022-vector,\n title = \"Vector-Quantized Input-Contextualized Soft Prompts for Natural Language Understanding\",\n author = \"Bhardwaj, Rishabh and\n Saha, Amrita and\n Hoi, Steven C.H. and\n Poria, Soujanya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.455/\",\n doi = \"10.18653/v1/2022.emnlp-main.455\",\n pages = \"6776--6791\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.455.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.455/", + "pdf_size": 1392619, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3007128011327477258&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Salesforce Research; Salesforce Research; Salesforce Research; Singapore University of Technology and Design", + "aff_domain": "salesforce.com; ; ; ", + "email": "salesforce.com; ; ; ", + "github": "https://github.com/declare-lab/VIP", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;1", + "aff_unique_norm": "Salesforce;Singapore University of Technology and Design", + "aff_unique_dep": "Salesforce Research;", + "aff_unique_url": "https://research.salesforce.com;https://www.sutd.edu.sg", + "aff_unique_abbr": "Salesforce;SUTD", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;1", + "aff_country_unique": "United States;Singapore" + }, + { + "id": "2022.emnlp-main.432", + "title": "Video Question Answering: Datasets, Algorithms and Challenges", + "track": "main", + "status": "Main", + "award": false, + "abstract": "This survey aims to sort out the recent advances in video question answering (VideoQA) and point towards future directions. We firstly categorize the datasets into 1) normal VideoQA, multi-modal VideoQA and knowledge-based VideoQA, according to the modalities invoked in the question-answer pairs, or 2) factoid VideoQA and inference VideoQA, according to the technical challenges in comprehending the questions and deriving the correct answers. We then summarize the VideoQA techniques, including those mainly designed for Factoid QA (e.g., the early spatio-temporal attention-based methods and the recently Transformer-based ones) and those targeted at explicit relation and logic inference (e.g., neural modular networks, neural symbolic methods, and graph-structured methods). Aside from the backbone techniques, we delve into the specific models and find out some common and useful insights either for video modeling, question answering, or for cross-modal correspondence learning. Finally, we point out the research trend of studying beyond factoid VideoQA to inference VideoQA, as well as towards the robustness and interpretability. Additionally, we maintain a repository, https://github.com/VRU-NExT/VideoQA, to keep trace of the latest VideoQA papers, datasets, and their open-source implementations if available. With these efforts, we strongly hope this survey could shed light on the follow-up VideoQA research.", + "author": "Yaoyao Zhong; Wei Ji; Junbin Xiao; Yicong Li; Weihong Deng; Tat-Seng Chua", + "authorids": "/y/yaoyao-zhong/; /w/wei-ji/; /j/junbin-xiao/; /y/yicong-li/; /w/weihong-deng/; /t/tat-seng-chua/", + "bibtex": "@inproceedings{zhong-etal-2022-video,\n title = \"Video Question Answering: Datasets, Algorithms and Challenges\",\n author = \"Zhong, Yaoyao and\n Ji, Wei and\n Xiao, Junbin and\n Li, Yicong and\n Deng, Weihong and\n Chua, Tat-Seng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.432/\",\n doi = \"10.18653/v1/2022.emnlp-main.432\",\n pages = \"6439--6455\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.432.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.432/", + "pdf_size": 1464410, + "gs_citation": 107, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=525004147993762442&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 4, + "aff": "National University of Singapore; National University of Singapore + Sea-NExT Joint Lab; National University of Singapore + Sea-NExT Joint Lab; National University of Singapore; Beijing University of Posts and Telecommunications; National University of Singapore + Sea-NExT Joint Lab", + "aff_domain": "bupt.edu.cn;comp.nus.edu.sg;nus.edu.sg;u.nus.edu;bupt.edu.cn;nus.edu.sg", + "email": "bupt.edu.cn;comp.nus.edu.sg;nus.edu.sg;u.nus.edu;bupt.edu.cn;nus.edu.sg", + "github": "https://github.com/VRU-NExT/VideoQA", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0+1;0+1;0;2;0+1", + "aff_unique_norm": "National University of Singapore;Sea-NExT Joint Lab;Beijing University of Posts and Telecommunications", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.nus.edu.sg;;http://www.bupt.edu.cn/", + "aff_unique_abbr": "NUS;;BUPT", + "aff_campus_unique_index": ";;1;", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0;2;0", + "aff_country_unique": "Singapore;;China" + }, + { + "id": "2022.emnlp-main.675", + "title": "VisToT: Vision-Augmented Table-to-Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Table-to-text generation has been widely studied in the Natural Language Processing community in the recent years. We give a new perspective to this problem by incorporating signals from both tables as well as associated images to generate relevant text. While tables contain a structured list of facts, images are a rich source of unstructured visual information. For example, in the tourism domain, images can be used to infer knowledge such as the type of landmark (e.g., church), its architecture (e.g., Ancient Roman), and composition (e.g., white marble). Therefore, in this paper, we introduce the novel task of Vision-augmented Table-To-Text Generation (VisToT, defined as follows: given a table and an associated image, produce a descriptive sentence conditioned on the multimodal input. For the task, we present a novel multimodal table-to-text dataset, WikiLandmarks, covering 73,084 unique world landmarks. Further, we also present a competitive architecture, namely, VT3 that generates accurate sentences conditioned on the image and table pairs. Through extensive analyses and experiments, we show that visual cues from images are helpful in (i) inferring missing information from incomplete or sparse tables, and (ii) strengthening the importance of useful information from noisy tables for natural language generation. We make the code and data publicly available.", + "author": "Prajwal Gatti; Anand Mishra; Manish Gupta; Mithun Das Gupta", + "authorids": "/p/prajwal-gatti/; /a/anand-mishra/; /m/manish-gupta/; /m/mithun-das-gupta/", + "bibtex": "@inproceedings{gatti-etal-2022-vistot,\n title = \"{V}is{T}o{T}: Vision-Augmented Table-to-Text Generation\",\n author = \"Gatti, Prajwal and\n Mishra, Anand and\n Gupta, Manish and\n Das Gupta, Mithun\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.675/\",\n doi = \"10.18653/v1/2022.emnlp-main.675\",\n pages = \"9936--9949\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.675.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.675/", + "pdf_size": 5929681, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9026766036542799930&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "Indian Institute of Technology Jodhpur; Indian Institute of Technology Jodhpur; Microsoft; Microsoft", + "aff_domain": "iitj.ac.in;iitj.ac.in;microsoft.com;microsoft.com", + "email": "iitj.ac.in;iitj.ac.in;microsoft.com;microsoft.com", + "github": "", + "project": "https://vl2g.github.io/projects/vistot", + "author_num": 4, + "aff_unique_index": "0;0;1;1", + "aff_unique_norm": "Indian Institute of Technology Jodhpur;Microsoft Corporation", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.iitj.ac.in;https://www.microsoft.com", + "aff_unique_abbr": "IIT Jodhpur;Microsoft", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Jodhpur;", + "aff_country_unique_index": "0;0;1;1", + "aff_country_unique": "India;United States" + }, + { + "id": "2022.findings-emnlp.178", + "title": "Visual Named Entity Linking: A New Dataset and A Baseline", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Visual Entity Linking (VEL) is a task to link regions of images with their corresponding entities in Knowledge Bases (KBs), which is beneficial for many computer vision tasks such as image retrieval, image caption, and visual question answering. While existing tasks in VEL either rely on textual data to complement a multi-modal linking or only link objects with general entities, which fails to perform named entity linking on large amounts of image data. In this paper, we consider a purely Visual-based Named Entity Linking (VNEL) task, where the input only consists of an image. The task is to identify objects of interest (i.e., visual entity mentions) in images and link them to corresponding named entities in KBs. Since each entity often contains rich visual and textual information in KBs, we thus propose three different sub-tasks, i.e., visual to visual entity linking (V2VEL), visual to textual entity linking (V2TEL), and visual to visual-textual entity linking (V2VTEL). In addition, we present a high-quality human-annotated visual person linking dataset, named WIKIPerson. Based on WIKIPerson, we establish a series of baseline algorithms for the solution of each sub-task, and conduct experiments to verify the quality of the proposed datasets and the effectiveness of baseline methods. We envision this work to be helpful for soliciting more works regarding VNEL in the future. The codes and datasets are publicly available at https: //github.com/ict-bigdatalab/VNEL.", + "author": "Wen Sun; Yixing Fan; Jiafeng Guo; Ruqing Zhang; Xueqi Cheng", + "authorids": "/w/wen-sun/; /y/yixing-fan/; /j/jiafeng-guo/; /r/ruqing-zhang/; /x/xueqi-cheng/", + "bibtex": "@inproceedings{sun-etal-2022-visual,\n title = \"Visual Named Entity Linking: A New Dataset and A Baseline\",\n author = \"Sun, Wen and\n Fan, Yixing and\n Guo, Jiafeng and\n Zhang, Ruqing and\n Cheng, Xueqi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.178/\",\n doi = \"10.18653/v1/2022.findings-emnlp.178\",\n pages = \"2403--2415\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.178.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.178/", + "pdf_size": 9351231, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7839504934004240951&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; University of Chinese Academy of Sciences, Beijing, China; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China; CAS Key Lab of Network Data Science and Technology, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China", + "aff_domain": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn;ict.ac.cn", + "github": "https://github.com/ict-bigdatalab/VNEL", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;0;0;0", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Computing Technology;", + "aff_unique_url": "http://www.cas.ac.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.93", + "title": "Visual Spatial Description: Controlled Spatial-Oriented Image-to-Text Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Image-to-text tasks such as open-ended image captioning and controllable image description have received extensive attention for decades. Here we advance this line of work further, presenting Visual Spatial Description (VSD), a new perspective for image-to-text toward spatial semantics. Given an image and two objects inside it, VSD aims to produce one description focusing on the spatial perspective between the two objects. Accordingly, we annotate a dataset manually to facilitate the investigation of the newly-introduced task, and then build several benchmark encoder-decoder models by using VL-BART and VL-T5 as backbones. In addition, we investigate visual spatial relationship classification (VSRC) information into our model by pipeline and end-to-end architectures. Finally, we conduct experiments on our benchmark dataset to evaluate all our models. Results show that our models are awe-inspiring, offering accurate and human-like spatial-oriented text descriptions. Besides, VSRC has great potential for VSD, and the joint end-to-end architecture is the better choice for their integration. We will make the dataset and codes publicly available for research purposes.", + "author": "Yu Zhao; Jianguo Wei; ZhiChao Lin; Yueheng Sun; Meishan Zhang; Min Zhang", + "authorids": "/y/yu-zhao/; /j/jianguo-wei/; /z/zhichao-lin/; /y/yueheng-sun/; /m/meishan-zhang/; /m/min-zhang/", + "bibtex": "@inproceedings{zhao-etal-2022-visual,\n title = \"Visual Spatial Description: Controlled Spatial-Oriented Image-to-Text Generation\",\n author = \"Zhao, Yu and\n Wei, Jianguo and\n Lin, ZhiChao and\n Sun, Yueheng and\n Zhang, Meishan and\n Zhang, Min\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.93/\",\n doi = \"10.18653/v1/2022.emnlp-main.93\",\n pages = \"1437--1449\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.93.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.93/", + "pdf_size": 1488036, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6103941806004339142&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "College of Intelligence and Computing, Tianjin University; College of Intelligence and Computing, Tianjin University; School of New Media and Communication, Tianjin University; College of Intelligence and Computing, Tianjin University; Institute of Computing and Intelligence, Harbin Institute of Technology (Shenzhen); Institute of Computing and Intelligence, Harbin Institute of Technology (Shenzhen)", + "aff_domain": "tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;hit.edu.cn;hit.edu.cn", + "email": "tju.edu.cn;tju.edu.cn;tju.edu.cn;tju.edu.cn;hit.edu.cn;hit.edu.cn", + "github": "https://github.com/zhaoyucs/VSD", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;1;1", + "aff_unique_norm": "Tianjin University;Harbin Institute of Technology", + "aff_unique_dep": "College of Intelligence and Computing;Institute of Computing and Intelligence", + "aff_unique_url": "http://www.tju.edu.cn;http://www.hit.edu.cn/", + "aff_unique_abbr": ";HIT", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Shenzhen", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.45", + "title": "Visualizing the Obvious: A Concreteness-based Ensemble Model for Noun Property Prediction", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Neural language models encode rich knowledge about entities and their relationships which can be extracted from their representations using probing. Common properties of nouns (e.g., red strawberries, small ant) are, however, more challenging to extract compared to other types of knowledge because they are rarely explicitly stated in texts.We hypothesize this to mainly be the case for perceptual properties which are obvious to the participants in the communication. We propose to extract these properties from images and use them in an ensemble model, in order to complement the information that is extracted from language models. We consider perceptual properties to be more concrete than abstract properties (e.g., interesting, flawless). We propose to use the adjectives\u2019 concreteness score as a lever to calibrate the contribution of each source (text vs. images). We evaluate our ensemble model in a ranking task where the actual properties of a noun need to be ranked higher than other non-relevant properties. Our results show that the proposed combination of text and images greatly improves noun property prediction compared to powerful text-based language models.", + "author": "Yue Yang; Artemis Panagopoulou; Marianna Apidianaki; Mark Yatskar; Chris Callison-Burch", + "authorids": "/y/yue-yang/; /a/artemis-panagopoulou/; /m/marianna-apidianaki/; /m/mark-yatskar/; /c/chris-callison-burch/", + "bibtex": "@inproceedings{yang-etal-2022-visualizing,\n title = \"Visualizing the Obvious: A Concreteness-based Ensemble Model for Noun Property Prediction\",\n author = \"Yang, Yue and\n Panagopoulou, Artemis and\n Apidianaki, Marianna and\n Yatskar, Mark and\n Callison-Burch, Chris\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.45/\",\n doi = \"10.18653/v1/2022.findings-emnlp.45\",\n pages = \"638--655\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.45.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.45/", + "pdf_size": 5219052, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2135985147144185688&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 9, + "aff": "Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania; Department of Computer and Information Science, University of Pennsylvania", + "aff_domain": "seas.upenn.edu;seas.upenn.edu;seas.upenn.edu;seas.upenn.edu;seas.upenn.edu", + "email": "seas.upenn.edu;seas.upenn.edu;seas.upenn.edu;seas.upenn.edu;seas.upenn.edu", + "github": "https://github.com/artemisp/semantic-norms638", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "University of Pennsylvania", + "aff_unique_dep": "Department of Computer and Information Science", + "aff_unique_url": "https://www.upenn.edu", + "aff_unique_abbr": "UPenn", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.322", + "title": "Viterbi Decoding of Directed Acyclic Transformer for Non-Autoregressive Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Non-autoregressive models achieve significant decoding speedup in neural machine translation but lack the ability to capture sequential dependency. Directed Acyclic Transformer (DA-Transformer) was recently proposed to model sequential dependency with a directed acyclic graph. Consequently, it has to apply a sequential decision process at inference time, which harms the global translation accuracy. In this paper, we present a Viterbi decoding framework for DA-Transformer, which guarantees to find the joint optimal solution for the translation and decoding path under any length constraint. Experimental results demonstrate that our approach consistently improves the performance of DA-Transformer while maintaining a similar decoding speedup.", + "author": "Chenze Shao; Zhengrui Ma; Yang Feng", + "authorids": "/c/chenze-shao/; /z/zhengrui-ma/; /y/yang-feng/", + "bibtex": "@inproceedings{shao-etal-2022-viterbi,\n title = \"{V}iterbi Decoding of Directed Acyclic Transformer for Non-Autoregressive Machine Translation\",\n author = \"Shao, Chenze and\n Ma, Zhengrui and\n Feng, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.322/\",\n doi = \"10.18653/v1/2022.findings-emnlp.322\",\n pages = \"4390--4397\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.322.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.322/", + "pdf_size": 284903, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12413636123351148279&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences + University of Chinese Academy of Sciences", + "aff_domain": "ict.ac.cn;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;ict.ac.cn", + "github": "https://github.com/thu-coai/DA-Transformer", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Computing Technology;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.508", + "title": "WANLI: Worker and AI Collaboration for Natural Language Inference Dataset Creation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A recurring challenge of crowdsourcing NLP datasets at scale is that human writers often rely on repetitive patterns when crafting examples, leading to a lack of linguistic diversity. We introduce a novel approach for dataset creation based on worker and AI collaboration, which brings together the generative strength of language models and the evaluative strength of humans. Starting with an existing dataset, MultiNLI for natural language inference (NLI), our approach uses dataset cartography to automatically identify examples that demonstrate challenging reasoning patterns, and instructs GPT-3 to compose new examples with similar patterns. Machine generated examples are then automatically filtered, and finally revised and labeled by human crowdworkers. The resulting dataset, WANLI, consists of 107,885 NLI examples and presents unique empirical strengths over existing NLI datasets. Remarkably, training a model on WANLI improves performance on eight out-of-domain test sets we consider, including by 11% on HANS and 9% on Adversarial NLI, compared to training on the 4x larger MultiNLI. Moreover, it continues to be more effective than MultiNLI augmented with other NLI datasets. Our results demonstrate the promise of leveraging natural language generation techniques and re-imagining the role of humans in the dataset creation process.", + "author": "Alisa Liu; Swabha Swayamdipta; Noah A. Smith; Yejin Choi", + "authorids": "/a/alisa-liu/; /s/swabha-swayamdipta/; /n/noah-a-smith/; /y/yejin-choi/", + "bibtex": "@inproceedings{liu-etal-2022-wanli,\n title = \"{WANLI}: Worker and {AI} Collaboration for Natural Language Inference Dataset Creation\",\n author = \"Liu, Alisa and\n Swayamdipta, Swabha and\n Smith, Noah A. and\n Choi, Yejin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.508/\",\n doi = \"10.18653/v1/2022.findings-emnlp.508\",\n pages = \"6826--6847\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.508.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.508/", + "pdf_size": 2195710, + "gs_citation": 230, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14224799134138920015&as_sdt=5,48&sciodt=0,48&hl=en", + "gs_version_total": 5, + "aff": "Paul G. Allen School of Computer Science & Engineering, University of Washington + Allen Institute for Artificial Intelligence; Allen Institute for Artificial Intelligence + University of Southern California; Paul G. Allen School of Computer Science & Engineering, University of Washington + Allen Institute for Artificial Intelligence; Paul G. Allen School of Computer Science & Engineering, University of Washington + Allen Institute for Artificial Intelligence", + "aff_domain": "cs.washington.edu; ; ; ", + "email": "cs.washington.edu; ; ; ", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;1+2;0+1;0+1", + "aff_unique_norm": "University of Washington;Allen Institute for Artificial Intelligence;University of Southern California", + "aff_unique_dep": "Paul G. Allen School of Computer Science & Engineering;;", + "aff_unique_url": "https://www.washington.edu;https://allenai.org;https://www.usc.edu", + "aff_unique_abbr": "UW;AI2;USC", + "aff_campus_unique_index": "0;2;0;0", + "aff_campus_unique": "Seattle;;Los Angeles", + "aff_country_unique_index": "0+0;0+0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.491", + "title": "WR-One2Set: Towards Well-Calibrated Keyphrase Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Keyphrase generation aims to automatically generate short phrases summarizing an input document. The recently emerged ONE2SET paradigm (Ye et al., 2021) generates keyphrases as a set and has achieved competitive performance. Nevertheless, we observe serious calibration errors outputted by ONE2SET, especially in the over-estimation of \u2205 token (means \u201cno corresponding keyphrase\u201d). In this paper, we deeply analyze this limitation and identify two main reasons behind: 1) the parallel generation has to introduce excessive \u2205 as padding tokens into training instances; and 2) the training mechanism assigning target to each slot is unstable and further aggravates the \u2205 token over-estimation. To make the model well-calibrated, we propose WR-ONE2SET which extends ONE2SET with an adaptive instance-level cost Weighting strategy and a target Re-assignment mechanism. The former dynamically penalizes the over-estimated slots for different instances thus smoothing the uneven training distribution. The latter refines the original inappropriate assignment and reduces the supervisory signals of over-estimated slots. Experimental results on commonly-used datasets demonstrate the effectiveness and generality of our proposed paradigm.", + "author": "Binbin Xie; Xiangpeng Wei; Baosong Yang; Huan Lin; Jun Xie; Xiaoli Wang; Min Zhang; Jinsong Su", + "authorids": "/b/binbin-xie/; /x/xiangpeng-wei/; /b/baosong-yang/; /h/huan-lin/; /j/jun-xie/; /x/xiaoli-wang/; /m/min-zhang/; /j/jinsong-su/", + "bibtex": "@inproceedings{xie-etal-2022-wr,\n title = \"{WR}-{O}ne2{S}et: Towards Well-Calibrated Keyphrase Generation\",\n author = \"Xie, Binbin and\n Wei, Xiangpeng and\n Yang, Baosong and\n Lin, Huan and\n Xie, Jun and\n Wang, Xiaoli and\n Zhang, Min and\n Su, Jinsong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.491/\",\n doi = \"10.18653/v1/2022.emnlp-main.491\",\n pages = \"7283--7293\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.491.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.491/", + "pdf_size": 1249117, + "gs_citation": 20, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2136813182526445592&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "School of Informatics, Xiamen University, China+Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan, Ministry of Culture and Tourism, China; Alibaba Group, China; Alibaba Group, China; Alibaba Group, China; Alibaba Group, China; Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan, Ministry of Culture and Tourism, China; Soochow University, China; School of Informatics, Xiamen University, China+Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan, Ministry of Culture and Tourism, China", + "aff_domain": "stu.xmu.edu.cn;gmail.com;alibaba-inc.com; ; ; ; ;xmu.edu.cn", + "email": "stu.xmu.edu.cn;gmail.com;alibaba-inc.com; ; ; ; ;xmu.edu.cn", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0+1;2;2;2;2;1;3;0+1", + "aff_unique_norm": "Xiamen University;Ministry of Culture and Tourism;Alibaba Group;Soochow University", + "aff_unique_dep": "School of Informatics;Key Laboratory of Digital Protection and Intelligent Processing of Intangible Cultural Heritage of Fujian and Taiwan;;", + "aff_unique_url": "https://www.xmu.edu.cn;;https://www.alibaba.com;https://www.soochow.edu.cn", + "aff_unique_abbr": "XMU;;Alibaba;Soochow U", + "aff_campus_unique_index": ";", + "aff_campus_unique": "", + "aff_country_unique_index": "0+0;0;0;0;0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.84", + "title": "WSpeller: Robust Word Segmentation for Enhancing Chinese Spelling Check", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Chinese spelling check (CSC) detects and corrects spelling errors in Chinese texts. Previous approaches have combined character-level phonetic and graphic information, ignoring the importance of segment-level information. According to our pilot study, spelling errors are always associated with incorrect word segmentation. When appropriate word boundaries are provided, CSC performance is greatly enhanced. Based on these findings, we present WSpeller, a CSC model that takes into account word segmentation. A fundamental component of WSpeller is a W-MLM, which is trained by predicting visually and phonetically similar words. Through modification of the embedding layer\u2019s input, word segmentation information can be incorporated. Additionally, a robust module is trained to assist the W-MLM-based correction module by predicting the correct word segmentations from sentences containing spelling errors. We evaluate WSpeller on the widely used benchmark datasets SIGHAN13, SIGHAN14, and SIGHAN15. Our model is superior to state-of-the-art baselines on SIGHAN13 and SIGHAN15 and maintains equal performance on SIGHAN14.", + "author": "Fangfang Li; Youran Shan; Junwen Duan; Xingliang Mao; Minlie Huang", + "authorids": "/f/fangfang-li/; /y/youran-shan/; /j/junwen-duan/; /x/xingliang-mao/; /m/minlie-huang/", + "bibtex": "@inproceedings{li-etal-2022-wspeller,\n title = \"{WS}peller: Robust Word Segmentation for Enhancing {C}hinese Spelling Check\",\n author = \"Li, Fangfang and\n Shan, Youran and\n Duan, Junwen and\n Mao, Xingliang and\n Huang, Minlie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.84/\",\n doi = \"10.18653/v1/2022.findings-emnlp.84\",\n pages = \"1179--1188\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.84.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.84/", + "pdf_size": 506535, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8025056017373605819&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 2, + "aff": "School of Computer Science and Engineering, Central South University; School of Computer Science and Engineering, Central South University; School of Computer Science and Engineering, Central South University; Institute of Big Data And Internet Innovation, Hunan University of Technology and Business; Beijing National Research Center for Information Science and Technology, Tsinghua University", + "aff_domain": "csu.edu.cn;csu.edu.cn;csu.edu.cn;163.com;tsinghua.edu.cn", + "email": "csu.edu.cn;csu.edu.cn;csu.edu.cn;163.com;tsinghua.edu.cn", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;1;2", + "aff_unique_norm": "Central South University;Hunan University of Technology and Business;Tsinghua University", + "aff_unique_dep": "School of Computer Science and Engineering;Institute of Big Data And Internet Innovation;Beijing National Research Center for Information Science and Technology", + "aff_unique_url": "http://www.csu.edu.cn;;https://www.tsinghua.edu.cn", + "aff_unique_abbr": ";;THU", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.166", + "title": "Wait-info Policy: Balancing Source and Target at Information Level for Simultaneous Machine Translation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Simultaneous machine translation (SiMT) outputs the translation while receiving the source inputs, and hence needs to balance the received source information and translated target information to make a reasonable decision between waiting for inputs or outputting translation. Previous methods always balance source and target information at the token level, either directly waiting for a fixed number of tokens or adjusting the waiting based on the current token. In this paper, we propose a Wait-info Policy to balance source and target at the information level. We first quantify the amount of information contained in each token, named info. Then during simultaneous translation, the decision of waiting or outputting is made based on the comparison results between the total info of previous target outputs and received source inputs. Experiments show that our method outperforms strong baselines under and achieves better balance via the proposed info.", + "author": "Shaolei Zhang; Shoutao Guo; Yang Feng", + "authorids": "/s/shaolei-zhang/; /s/shoutao-guo/; /y/yang-feng/", + "bibtex": "@inproceedings{zhang-etal-2022-wait,\n title = \"Wait-info Policy: Balancing Source and Target at Information Level for Simultaneous Machine Translation\",\n author = \"Zhang, Shaolei and\n Guo, Shoutao and\n Feng, Yang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.166/\",\n doi = \"10.18653/v1/2022.findings-emnlp.166\",\n pages = \"2249--2263\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.166.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.166/", + "pdf_size": 917007, + "gs_citation": 18, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4897755565454786871&as_sdt=400005&sciodt=0,14&hl=en", + "gs_version_total": 5, + "aff": "Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences, Beijing, China; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences, Beijing, China; Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (ICT/CAS) + University of Chinese Academy of Sciences, Beijing, China", + "aff_domain": "ict.ac.cn;ict.ac.cn;ict.ac.cn", + "email": "ict.ac.cn;ict.ac.cn;ict.ac.cn", + "github": "https://github.com/ictnlp/Wait-info", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences", + "aff_unique_dep": "Institute of Computing Technology;", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn", + "aff_unique_abbr": "CAS;UCAS", + "aff_campus_unique_index": "1;1;1", + "aff_campus_unique": ";Beijing", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.98", + "title": "Watch the Neighbors: A Unified K-Nearest Neighbor Contrastive Learning Framework for OOD Intent Discovery", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Discovering out-of-domain (OOD) intent is important for developing new skills in task-oriented dialogue systems. The key challenges lie in how to transfer prior in-domain (IND) knowledge to OOD clustering, as well as jointly learn OOD representations and cluster assignments. Previous methods suffer from in-domain overfitting problem, and there is a natural gap between representation learning and clustering objectives. In this paper, we propose a unified K-nearest neighbor contrastive learning framework to discover OOD intents. Specifically, for IND pre-training stage, we propose a KCL objective to learn inter-class discriminative features, while maintaining intra-class diversity, which alleviates the in-domain overfitting problem. For OOD clustering stage, we propose a KCC method to form compact clusters by mining true hard negative samples, which bridges the gap between clustering and representation learning. Extensive experiments on three benchmark datasets show that our method achieves substantial improvements over the state-of-the-art methods.", + "author": "Yutao Mou; Keqing He; Pei Wang; Yanan Wu; Jingang Wang; Wei Wu; Weiran Xu", + "authorids": "/y/yutao-mou/; /k/keqing-he/; /p/pei-wang/; /y/yanan-wu/; /j/jingang-wang/; /w/wei-wu/; /w/weiran-xu/", + "bibtex": "@inproceedings{mou-etal-2022-watch,\n title = \"Watch the Neighbors: A Unified K-Nearest Neighbor Contrastive Learning Framework for {OOD} Intent Discovery\",\n author = \"Mou, Yutao and\n He, Keqing and\n Wang, Pei and\n Wu, Yanan and\n Wang, Jingang and\n Wu, Wei and\n Xu, Weiran\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.98/\",\n doi = \"10.18653/v1/2022.emnlp-main.98\",\n pages = \"1517--1529\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.98.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.98/", + "pdf_size": 3311558, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3345195257642103612&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Beijing University of Posts and Telecommunications; Meituan; Beijing University of Posts and Telecommunications; Beijing University of Posts and Telecommunications; Meituan; Meituan; Beijing University of Posts and Telecommunications", + "aff_domain": "bupt.edu.cn;meituan.com;bupt.edu.cn;bupt.edu.cn;meituan.com;meituan.com;bupt.edu.cn", + "email": "bupt.edu.cn;meituan.com;bupt.edu.cn;bupt.edu.cn;meituan.com;meituan.com;bupt.edu.cn", + "github": "https://github.com/myt517/KCOD", + "project": "", + "author_num": 7, + "aff_unique_index": "0;1;0;0;1;1;0", + "aff_unique_norm": "Beijing University of Posts and Telecommunications;Meituan", + "aff_unique_dep": ";", + "aff_unique_url": "http://www.bupt.edu.cn/;https://www.meituan.com", + "aff_unique_abbr": "BUPT;Meituan", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Beijing;", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.798", + "title": "WeDef: Weakly Supervised Backdoor Defense for Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Existing backdoor defense methods are only effective for limited trigger types. To defend different trigger types at once, we start from the class-irrelevant nature of the poisoning process and propose a novel weakly supervised backdoor defense framework WeDef. Recent advances in weak supervision make it possible to train a reasonably accurate text classifier using only a small number of user-provided, class-indicative seed words. Such seed words shall be considered independent of the triggers. Therefore, a weakly supervised text classifier trained by only the poisoned documents without their labels will likely have no backdoor. Inspired by this observation, in WeDef, we define the reliability of samples based on whether the predictions of the weak classifier agree with their labels in the poisoned training set. We further improve the results through a two-phase sanitization: (1) iteratively refine the weak classifier based on the reliable samples and (2) train a binary poison classifier by distinguishing the most unreliable samples from the most reliable samples. Finally, we train the sanitized model on the samples that the poison classifier predicts as benign. Extensive experiments show that WeDef is effective against popular trigger-based attacks (e.g., words, sentences, and paraphrases), outperforming existing defense methods.", + "author": "Lesheng Jin; Zihan Wang; Jingbo Shang", + "authorids": "/l/lesheng-jin/; /z/zihan-wang/; /j/jingbo-shang/", + "bibtex": "@inproceedings{jin-etal-2022-wedef,\n title = \"{W}e{D}ef: Weakly Supervised Backdoor Defense for Text Classification\",\n author = \"Jin, Lesheng and\n Wang, Zihan and\n Shang, Jingbo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.798/\",\n doi = \"10.18653/v1/2022.emnlp-main.798\",\n pages = \"11614--11626\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.798.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.798/", + "pdf_size": 443027, + "gs_citation": 15, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17354619397857976415&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": "University of California, San Diego; University of California, San Diego; University of California, San Diego", + "aff_domain": "ucsd.edu;ucsd.edu;ucsd.edu", + "email": "ucsd.edu;ucsd.edu;ucsd.edu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of California, San Diego", + "aff_unique_dep": "", + "aff_unique_url": "https://www.ucsd.edu", + "aff_unique_abbr": "UCSD", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "San Diego", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.353", + "title": "WeTS: A Benchmark for Translation Suggestion", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Translation suggestion (TS), which provides alternatives for specific words or phrases given the entire documents generated by machine translation (MT), has been proven to play a significant role in post-editing (PE). There are two main pitfalls for existing researches in this line. First, most conventional works only focus on the overall performance of PE but ignore the exact performance of TS, which makes the progress of PE sluggish and less explainable; Second, as no publicly available golden dataset exists to support in-depth research for TS, almost all of the previous works conduct experiments on their in-house datasets or the noisy datasets built automatically, which makes their experiments hard to be reproduced and compared. To break these limitations mentioned above and spur the research in TS, we create a benchmark dataset, called WeTS, which is a golden corpus annotated by expert translators on four translation directions. Apart from the golden corpus, we also propose several methods to generate synthetic corpora which can be used to improve the performance substantially through pre-training. As for the model, we propose the segment-aware self-attention based Transformer for TS. Experimental results show that our approach achieves the best results on all four directions, including English-to-German, German-to-English, Chinese-to-English, and English-to-Chinese.", + "author": "Zhen Yang; Fandong Meng; Yingxue Zhang; Ernan Li; Jie Zhou", + "authorids": "/z/zhen-yang/; /f/fandong-meng/; /y/yingxue-zhang/; /e/ernan-li/; /j/jie-zhou/", + "bibtex": "@inproceedings{yang-etal-2022-wets,\n title = \"{W}e{TS}: A Benchmark for Translation Suggestion\",\n author = \"Yang, Zhen and\n Meng, Fandong and\n Zhang, Yingxue and\n Li, Ernan and\n Zhou, Jie\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.353/\",\n doi = \"10.18653/v1/2022.emnlp-main.353\",\n pages = \"5278--5290\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.353.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.353/", + "pdf_size": 743060, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13137070709194673479&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Pattern Recognition Center, WeChat AI, Tencent Inc, Beijing, China; Pattern Recognition Center, WeChat AI, Tencent Inc, Beijing, China; Pattern Recognition Center, WeChat AI, Tencent Inc, Beijing, China; Pattern Recognition Center, WeChat AI, Tencent Inc, Beijing, China; Pattern Recognition Center, WeChat AI, Tencent Inc, Beijing, China", + "aff_domain": "tencent.com;tencent.com;tencent.com;tencent.com;tencent.com", + "email": "tencent.com;tencent.com;tencent.com;tencent.com;tencent.com", + "github": "https://github.com/ZhenYangIACAS/WeTS.git", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Tencent Inc", + "aff_unique_dep": "Pattern Recognition Center", + "aff_unique_url": "https://www.tencent.com", + "aff_unique_abbr": "Tencent", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Beijing", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.487", + "title": "Weakly Supervised Headline Dependency Parsing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "English news headlines form a register with unique syntactic properties that have been documented in linguistics literature since the 1930s. However, headlines have received surprisingly little attention from the NLP syntactic parsing community. We aim to bridge this gap by providing the first news headline corpus of Universal Dependencies annotated syntactic dependency trees, which enables us to evaluate existing state-of-the-art dependency parsers on news headlines. To improve English news headline parsing accuracies, we develop a projection method to bootstrap silver training data from unlabeled news headline-article lead sentence pairs. Models trained on silver headline parses demonstrate significant improvements in performance over models trained solely on gold-annotated long-form texts. Ultimately, we find that, although projected silver training data improves parser performance across different news outlets, the improvement is moderated by constructions idiosyncratic to outlet.", + "author": "Adrian Benton; Tianze Shi; Ozan \u0130rsoy; Igor Malioutov", + "authorids": "/a/adrian-benton/; /t/tianze-shi/; /o/ozan-irsoy/; /i/igor-malioutov/", + "bibtex": "@inproceedings{benton-etal-2022-weakly,\n title = \"Weakly Supervised Headline Dependency Parsing\",\n author = \"Benton, Adrian and\n Shi, Tianze and\n {\\.I}rsoy, Ozan and\n Malioutov, Igor\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.487/\",\n doi = \"10.18653/v1/2022.findings-emnlp.487\",\n pages = \"6520--6535\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.487.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.487/", + "pdf_size": 397756, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8079425282319574492&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Bloomberg + Google Research; Cornell University + Google Research; Bloomberg; Bloomberg", + "aff_domain": "google.com;google.com;bloomberg.net;bloomberg.net", + "email": "google.com;google.com;bloomberg.net;bloomberg.net", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0+1;2+1;0;0", + "aff_unique_norm": "Bloomberg;Google;Cornell University", + "aff_unique_dep": ";Google Research;", + "aff_unique_url": "https://www.bloomberg.com;https://research.google;https://www.cornell.edu", + "aff_unique_abbr": "Bloomberg;Google Research;Cornell", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Mountain View", + "aff_country_unique_index": "0+0;0+0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.639", + "title": "Weakly-Supervised Temporal Article Grounding", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Given a long untrimmed video and natural language queries, video grounding (VG) aims to temporally localize the semantically-aligned video segments. Almost all existing VG work holds two simple but unrealistic assumptions: 1) All query sentences can be grounded in the corresponding video. 2) All query sentences for the same video are always at the same semantic scale. Unfortunately, both assumptions make today\u2019s VG models fail to work in practice. For example, in real-world multimodal assets (eg, news articles), most of the sentences in the article can not be grounded in their affiliated videos, and they typically have rich hierarchical relations (ie, at different semantic scales). To this end, we propose a new challenging grounding task: Weakly-Supervised temporal Article Grounding (WSAG). Specifically, given an article and a relevant video, WSAG aims to localize all \u201cgroundable\u201d sentences to the video, and these sentences are possibly at different semantic scales. Accordingly, we collect the first WSAG dataset to facilitate this task: YouwikiHow, which borrows the inherent multi-scale descriptions in wikiHow articles and plentiful YouTube videos. In addition, we propose a simple but effective method DualMIL for WSAG, which consists of a two-level MIL loss and a single-/cross- sentence constraint loss. These training objectives are carefully designed for these relaxed assumptions. Extensive ablations have verified the effectiveness of DualMIL.", + "author": "Long Chen; Yulei Niu; Brian Chen; Xudong Lin; Guangxing Han; Christopher Thomas; Hammad Ayyubi; Heng Ji; Shih-Fu Chang", + "authorids": "/l/long-chen/; /y/yulei-niu/; /b/brian-chen/; /x/xudong-lin/; /g/guangxing-han/; /c/christopher-thomas/; /h/hammad-ayyubi/; /h/heng-ji/; /s/shih-fu-chang/", + "bibtex": "@inproceedings{chen-etal-2022-weakly,\n title = \"Weakly-Supervised Temporal Article Grounding\",\n author = \"Chen, Long and\n Niu, Yulei and\n Chen, Brian and\n Lin, Xudong and\n Han, Guangxing and\n Thomas, Christopher and\n Ayyubi, Hammad and\n Ji, Heng and\n Chang, Shih-Fu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.639/\",\n doi = \"10.18653/v1/2022.emnlp-main.639\",\n pages = \"9402--9413\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.639.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.639/", + "pdf_size": 4281744, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2728379297373988216&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 5, + "aff": "Columbia University; Columbia University; Columbia University; Columbia University; Columbia University; Virginia Tech; Columbia University; University of Illinois at Urbana-Champaign; Columbia University", + "aff_domain": "columbia.edu;columbia.edu;columbia.edu;columbia.edu;columbia.edu;cs.vt.edu;columbia.edu;illinois.edu;columbia.edu", + "email": "columbia.edu;columbia.edu;columbia.edu;columbia.edu;columbia.edu;cs.vt.edu;columbia.edu;illinois.edu;columbia.edu", + "github": "https://github.com/zjuchenlong/WSAG", + "project": "", + "author_num": 9, + "aff_unique_index": "0;0;0;0;0;1;0;2;0", + "aff_unique_norm": "Columbia University;Virginia Tech;University of Illinois at Urbana-Champaign", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.columbia.edu;https://www.vt.edu;https://illinois.edu", + "aff_unique_abbr": "Columbia;VT;UIUC", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Urbana-Champaign", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.523", + "title": "Weight Perturbation as Defense against Adversarial Word Substitutions", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The existence and pervasiveness of textual adversarial examples have raised serious concerns to security-critical applications. Many methods have been developed to defend against adversarial attacks for neural natural language processing (NLP) models.Adversarial training is one of the most successful defense methods by adding some random or intentional perturbations to the original input texts and making the models robust to the perturbed examples.In this study, we explore the feasibility of improving the adversarial robustness of NLP models by performing perturbations in the parameter space rather than the input feature space.The weight perturbation helps to find a better solution (i.e., the values of weights) that minimizes the adversarial loss among other feasible solutions.We found that the weight perturbation can significantly improve the robustness of NLP models when it is combined with the perturbation in the input embedding space, yielding the highest accuracy on both clean and adversarial examples across different datasets.", + "author": "Jianhan Xu; Linyang Li; Jiping Zhang; Xiaoqing Zheng; Kai-Wei Chang; Cho-Jui Hsieh; Xuanjing Huang", + "authorids": "/j/jianhan-xu/; /l/linyang-li/; /j/jiping-zhang/; /x/xiaoqing-zheng/; /k/kai-wei-chang/; /c/cho-jui-hsieh/; /x/xuan-jing-huang/", + "bibtex": "@inproceedings{xu-etal-2022-weight,\n title = \"Weight Perturbation as Defense against Adversarial Word Substitutions\",\n author = \"Xu, Jianhan and\n Li, Linyang and\n Zhang, Jiping and\n Zheng, Xiaoqing and\n Chang, Kai-Wei and\n Hsieh, Cho-Jui and\n Huang, Xuanjing\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.523/\",\n doi = \"10.18653/v1/2022.findings-emnlp.523\",\n pages = \"7054--7063\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.523.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.523/", + "pdf_size": 1786014, + "gs_citation": 4, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5369190133813022237&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "School of Computer Science, Fudan University, Shanghai, China+Shanghai Key Laboratory of Intelligent Information Processing; School of Computer Science, Fudan University, Shanghai, China+Shanghai Key Laboratory of Intelligent Information Processing; School of Computer Science, Fudan University, Shanghai, China+Shanghai Key Laboratory of Intelligent Information Processing; School of Computer Science, Fudan University, Shanghai, China+Shanghai Key Laboratory of Intelligent Information Processing; Department of Computer Science, University of California, Los Angeles, USA; Department of Computer Science, University of California, Los Angeles, USA; School of Computer Science, Fudan University, Shanghai, China+Shanghai Key Laboratory of Intelligent Information Processing", + "aff_domain": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;cs.ucla.edu;cs.ucla.edu;fudan.edu.cn", + "email": "fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;fudan.edu.cn;cs.ucla.edu;cs.ucla.edu;fudan.edu.cn", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+1;0+1;0+1;2;2;0+1", + "aff_unique_norm": "Fudan University;Shanghai Key Laboratory of Intelligent Information Processing;University of California, Los Angeles", + "aff_unique_dep": "School of Computer Science;Intelligent Information Processing;Department of Computer Science", + "aff_unique_url": "https://www.fudan.edu.cn;;https://www.ucla.edu", + "aff_unique_abbr": "Fudan;;UCLA", + "aff_campus_unique_index": "0;0;0;0;2;2;0", + "aff_campus_unique": "Shanghai;;Los Angeles", + "aff_country_unique_index": "0+0;0+0;0+0;0+0;1;1;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.findings-emnlp.317", + "title": "What Do Compressed Multilingual Machine Translation Models Forget?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Recently, very large pre-trained models achieve state-of-the-art results in various natural language processing (NLP) tasks, but their size makes it more challenging to apply them in resource-constrained environments. Compression techniques allow to drastically reduce the size of the models and therefore their inference time with negligible impact on top-tier metrics. However, the general performance averaged across multiple tasks and/or languages may hide a drastic performance drop on under-represented features, which could result in the amplification of biases encoded by the models. In this work, we assess the impact of compression methods on Multilingual Neural Machine Translation models (MNMT) for various language groups, gender, and semantic biases by extensive analysis of compressed models on different machine translation benchmarks, i.e. FLORES-101, MT-Gender, and DiBiMT. We show that the performance of under-represented languages drops significantly, while the average BLEU metric only slightly decreases. Interestingly, the removal of noisy memorization with compression leads to a significant improvement for some medium-resource languages. Finally, we demonstrate that compression amplifies intrinsic gender and semantic biases, even in high-resource languages.", + "author": "Alireza Mohammadshahi; Vassilina Nikoulina; Alexandre Berard; Caroline Brun; James Henderson; Laurent Besacier", + "authorids": "/a/alireza-mohammadshahi/; /v/vassilina-nikoulina/; /a/alexandre-berard/; /c/caroline-brun/; /j/james-henderson/; /l/laurent-besacier/", + "bibtex": "@inproceedings{mohammadshahi-etal-2022-compressed,\n title = \"What Do Compressed Multilingual Machine Translation Models Forget?\",\n author = \"Mohammadshahi, Alireza and\n Nikoulina, Vassilina and\n Berard, Alexandre and\n Brun, Caroline and\n Henderson, James and\n Besacier, Laurent\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.317/\",\n doi = \"10.18653/v1/2022.findings-emnlp.317\",\n pages = \"4308--4329\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.317.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.317/", + "pdf_size": 3295786, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=206526828136010320&as_sdt=5,31&sciodt=0,31&hl=en", + "gs_version_total": 5, + "aff": "NA VER LABS Europe+IDIAP Research Institute+EPFL; NA VER LABS Europe; NA VER LABS Europe; NA VER LABS Europe; IDIAP Research Institute; NA VER LABS Europe", + "aff_domain": "naverlabs.com;naverlabs.com;naverlabs.com;naverlabs.com;idiap.ch;naverlabs.com", + "email": "naverlabs.com;naverlabs.com;naverlabs.com;naverlabs.com;idiap.ch;naverlabs.com", + "github": "https://github.com/alirezamshi/bias-compressedMT", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1+2;0;0;0;1;0", + "aff_unique_norm": "NAVER LABS Europe;IDIAP Research Institute;Ecole Polytechnique F\u00e9d\u00e9rale de Lausanne", + "aff_unique_dep": ";;", + "aff_unique_url": "https://www.naverlabs.com/europe;https://www.idiap.ch;https://www.epfl.ch", + "aff_unique_abbr": "NAVER LABS Europe;;EPFL", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1+1;0;0;0;1;0", + "aff_country_unique": "Europe;Switzerland" + }, + { + "id": "2022.findings-emnlp.102", + "title": "What Has Been Enhanced in my Knowledge-Enhanced Language Model?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "A number of knowledge integration (KI) methods have recently been proposed to incorporate external knowledge into pretrained language models (LMs). Even though knowledge-enhanced LMs (KELMs) outperform base LMs on knowledge-intensive tasks, the inner-workings of these KI methods are not well-understood. For instance, it is unclear which knowledge is effectively integrated into KELMs and which is not; and if such integration led to catastrophic forgetting of already learned knowledge. We show that existing model interpretation methods such as linear probes and prompts have some key limitations in answering these questions. Then, we revisit KI from an information-theoretic view and propose a new theoretically sound probe model called Graph Convolution Simulator (GCS) for KI interpretation. GCS is eventually quite simple \u2013 it uses graph attention on the corresponding knowledge graph for interpretation.We conduct various experiments to verify that GCS provides reasonable interpretation results for two well-known KELMs: ERNIE and K-Adapter. Our experiments reveal that only little knowledge is successfully integrated in these models, and simply increasing the size of the KI corpus may not lead to better KELMs.", + "author": "Yifan Hou; Guoji Fu; Mrinmaya Sachan", + "authorids": "/y/yifan-hou/; /g/guoji-fu/; /m/mrinmaya-sachan/", + "bibtex": "@inproceedings{hou-etal-2022-enhanced,\n title = \"What Has Been Enhanced in my Knowledge-Enhanced Language Model?\",\n author = \"Hou, Yifan and\n Fu, Guoji and\n Sachan, Mrinmaya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.102/\",\n doi = \"10.18653/v1/2022.findings-emnlp.102\",\n pages = \"1417--1438\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.102.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.102/", + "pdf_size": 1011633, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16383901055526785448&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 4, + "aff": "ETH Z\u00fcrich; Southern University of Science and Technology; ETH Z\u00fcrich", + "aff_domain": "inf.ethz.ch;mail.sustech.edu.cn;inf.ethz.ch", + "email": "inf.ethz.ch;mail.sustech.edu.cn;inf.ethz.ch", + "github": "https://github.com/yifan-h/GCS_KI", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "ETH Z\u00fcrich;Southern University of Science and Technology", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.ethz.ch;https://www.sustech.edu.cn", + "aff_unique_abbr": "ETHZ;SUSTech", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "Switzerland;China" + }, + { + "id": "2022.findings-emnlp.54", + "title": "What Language Model to Train if You Have One Million GPU Hours?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The crystallization of modeling methods around the Transformer architecture has been a boon for practitioners. Simple, well-motivated architectural variations can transfer across tasks and scale, increasing the impact of modeling research. However, with the emergence of state-of-the-art 100B+ parameters models, large language models are increasingly expensive to accurately design and train. Notably, it can be difficult to evaluate how modeling decisions may impact emergent capabilities, given that these capabilities arise mainly from sheer scale alone.In the process of building BLOOM\u2013the Big Science Large Open-science Open-access Multilingual language model\u2013our goal is to identify an architecture and training setup that makes the best use of our 1,000,000 A100-GPU-hours budget.Specifically, we perform an ablation study at the billion-parameter scale comparing different modeling practices and their impact on zero-shot generalization.In addition, we study the impact of various popular pre-training corpora on zero-shot generalization. We also study the performance of a multilingual model and how it compares to the English-only one. Finally, we consider the scaling behaviour of Transformers to choose the target model size, shape, and training setup. All our models and code are open-sourced at https://huggingface.co/bigscience.", + "author": "Teven Le Scao; Thomas Wang; Daniel Hesslow; Stas Bekman; M Saiful Bari; Stella Biderman; Hady Elsahar; Niklas Muennighoff; Jason Phang; Ofir Press; Colin Raffel; Victor Sanh; Sheng Shen; Lintang Sutawika; Jaesung Tae; Zheng Xin Yong; Julien Launay; Iz Beltagy", + "authorids": "/t/teven-le-scao/; /t/thomas-wang/; /d/daniel-hesslow/; /s/stas-bekman/; /m/m-saiful-bari/; /s/stella-biderman/; /h/hady-elsahar/; /n/niklas-muennighoff/; /j/jason-phang/; /o/ofir-press/; /c/colin-raffel/; /v/victor-sanh/; /s/sheng-shen/; /l/lintang-sutawika/; /j/jaesung-tae/; /z/zheng-xin-yong/; /j/julien-launay/; /i/iz-beltagy/", + "bibtex": "@inproceedings{le-scao-etal-2022-language,\n title = \"What Language Model to Train if You Have One Million {GPU} Hours?\",\n author = \"Le Scao, Teven and\n Wang, Thomas and\n Hesslow, Daniel and\n Bekman, Stas and\n Bari, M Saiful and\n Biderman, Stella and\n Elsahar, Hady and\n Muennighoff, Niklas and\n Phang, Jason and\n Press, Ofir and\n Raffel, Colin and\n Sanh, Victor and\n Shen, Sheng and\n Sutawika, Lintang and\n Tae, Jaesung and\n Yong, Zheng Xin and\n Launay, Julien and\n Beltagy, Iz\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.54/\",\n doi = \"10.18653/v1/2022.findings-emnlp.54\",\n pages = \"765--782\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.54.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.54/", + "pdf_size": 568586, + "gs_citation": 124, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8955525358284155758&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;;;;;;;;;;;;", + "aff_domain": ";;;;;;;;;;;;;;;;;", + "email": ";;;;;;;;;;;;;;;;;", + "github": "", + "project": "https://huggingface.co/bigscience", + "author_num": 18 + }, + { + "id": "2022.emnlp-main.27", + "title": "What Makes Instruction Learning Hard? An Investigation and a New Challenge in a Synthetic Environment", + "track": "main", + "status": "Main", + "award": false, + "abstract": "The instruction learning paradigm\u2014where a model learns to perform new tasks from task descriptions alone\u2014has become popular in research on general-purpose models. The capabilities of large transformer models as instruction learners, however, remain poorly understood. We use a controlled synthetic environment to characterize such capabilities. Specifically, we use the task of deciding whether a given string matches a regular expression (viewed as an instruction) to identify properties of tasks, instructions, and instances that make instruction learning challenging. For instance, we find that our model, a fine-tuned T5-based text2text transformer, struggles with large regular languages, suggesting that less precise instructions are challenging for models. Instruction executions that require tracking longer contexts of prior steps are also difficult. We use our findings to systematically construct a challenging instruction learning dataset, which we call Hard RegSet. Fine-tuning on Hard RegSet, our large transformer learns to correctly interpret (with at least 90% accuracy) only 65.6% of test instructions, and 11%-24% of the instructions in out-of-distribution generalization settings. We thus propose Hard RegSet as a challenging instruction learning dataset, and a controlled environment for studying instruction learning.", + "author": "Matthew Finlayson; Kyle Richardson; Ashish Sabharwal; Peter Clark", + "authorids": "/m/matthew-finlayson/; /k/kyle-richardson/; /a/ashish-sabharwal/; /p/peter-clark/", + "bibtex": "@inproceedings{finlayson-etal-2022-makes,\n title = \"What Makes Instruction Learning Hard? An Investigation and a New Challenge in a Synthetic Environment\",\n author = \"Finlayson, Matthew and\n Richardson, Kyle and\n Sabharwal, Ashish and\n Clark, Peter\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.27/\",\n doi = \"10.18653/v1/2022.emnlp-main.27\",\n pages = \"414--426\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.27.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.27/", + "pdf_size": 345231, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16022944073108452768&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Allen Institute for AI, Seattle, WA; Allen Institute for AI, Seattle, WA; Allen Institute for AI, Seattle, WA; Allen Institute for AI, Seattle, WA", + "aff_domain": "allenai.org;allenai.org;allenai.org;allenai.org", + "email": "allenai.org;allenai.org;allenai.org;allenai.org", + "github": "https://github.com/allenai/RegSet", + "project": "", + "author_num": 4, + "aff_unique_index": "0;0;0;0", + "aff_unique_norm": "Allen Institute for AI", + "aff_unique_dep": "", + "aff_unique_url": "https://allenai.org", + "aff_unique_abbr": "AI2", + "aff_campus_unique_index": "0;0;0;0", + "aff_campus_unique": "Seattle", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.516", + "title": "What do Large Language Models Learn beyond Language?", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Large language models (LMs) have rapidly become a mainstay in Natural Language Processing. These models are known to acquire rich linguistic knowledge from training on large amounts of text. In this paper, we investigate if pre-training on text also confers these models with helpful \u2018inductive biases\u2019 for non-linguistic reasoning. On a set of 19 diverse non-linguistic tasks involving quantitative computations, recognizing regular expressions and reasoning over strings. We find that pretrained models significantly outperform comparable non-pretrained neural models. This remains true also in experiments with training non-pretrained models with fewer parameters to account for model regularization effects. We further explore the effect of text domain on LMs by pretraining models from text from different domains and provenances. Our experiments surprisingly reveal that the positive effects of pre-training persist even when pretraining on multi-lingual text or computer code, and even for text generated from synthetic languages. Our findings suggest a hithertho unexplored deep connection between pre-training and inductive learning abilities of language models", + "author": "Avinash Madasu; Shashank Srivastava", + "authorids": "/a/avinash-madasu/; /s/shashank-srivastava/", + "bibtex": "@inproceedings{madasu-srivastava-2022-large,\n title = \"What do Large Language Models Learn beyond Language?\",\n author = \"Madasu, Avinash and\n Srivastava, Shashank\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.516/\",\n doi = \"10.18653/v1/2022.findings-emnlp.516\",\n pages = \"6940--6953\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.516.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.516/", + "pdf_size": 904775, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14910873983937580040&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "UNC Chapel Hill; UNC Chapel Hill", + "aff_domain": "cs.unc.edu;cs.unc.edu", + "email": "cs.unc.edu;cs.unc.edu", + "github": "https://github.com/avinashsai/NILM", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "University of North Carolina at Chapel Hill", + "aff_unique_dep": "", + "aff_unique_url": "https://www.unc.edu", + "aff_unique_abbr": "UNC", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Chapel Hill", + "aff_country_unique_index": "0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.41", + "title": "When Can Transformers Ground and Compose: Insights from Compositional Generalization Benchmarks", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Humans can reason compositionally whilst grounding language utterances to the real world. Recent benchmarks like ReaSCAN (Wu et al., 2021) use navigation tasks grounded in a grid world to assess whether neural models exhibit similar capabilities. In this work, we present a simple transformer-based model that outperforms specialized architectures on ReaSCAN and a modified version (Qiu et al., 2021) of gSCAN (Ruis et al., 2020). On analyzing the task, we find that identifying the target location in the grid world is the main challenge for the models. Furthermore, we show that a particular split in ReaSCAN, which tests depth generalization, is unfair. On an amended version of this split, we show that transformers can generalize to deeper input structures. Finally, we design a simpler grounded compositional generalization task, RefEx, to investigate how transformers reason compositionally. We show that a single self-attention layer with a single head generalizes to novel combinations of object attributes. Moreover, we derive a precise mathematical construction of the transformer\u2019s computations from the learned network. Overall, we provide valuable insights about the grounded compositional generalization task and the behaviour of transformers on it, which would be useful for researchers working in this area.", + "author": "Ankur Sikarwar; Arkil Patel; Navin Goyal", + "authorids": "/a/ankur-sikarwar/; /a/arkil-patel/; /n/navin-goyal/", + "bibtex": "@inproceedings{sikarwar-etal-2022-transformers,\n title = \"When Can Transformers Ground and Compose: Insights from Compositional Generalization Benchmarks\",\n author = \"Sikarwar, Ankur and\n Patel, Arkil and\n Goyal, Navin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.41/\",\n doi = \"10.18653/v1/2022.emnlp-main.41\",\n pages = \"648--669\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.41.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.41/", + "pdf_size": 3471887, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3719125502945368653&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "I2R, A*STAR Singapore; Mila - Quebec AI Institute+McGill University; Microsoft Research India", + "aff_domain": "gmail.com;gmail.com;microsoft.com", + "email": "gmail.com;gmail.com;microsoft.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1+2;3", + "aff_unique_norm": "A*STAR Institute of High Performance Computing;Quebec AI Institute;McGill University;Microsoft Research", + "aff_unique_dep": "Institute of High Performance Computing;AI Institute;;Microsoft Research India", + "aff_unique_url": "https://www.a-star.edu.sg;https://mila.quebec;https://www.mcgill.ca;https://www.microsoft.com/en-us/research/group/microsoft-research-india", + "aff_unique_abbr": "I2R;Mila;McGill;MSR India", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1+1;2", + "aff_country_unique": "Singapore;Canada;India" + }, + { + "id": "2022.emnlp-main.148", + "title": "When FLUE Meets FLANG: Benchmarks and Large Pretrained Language Model for Financial Domain", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-trained language models have shown impressive performance on a variety of tasks and domains. Previous research on financial language models usually employs a generic training scheme to train standard model architectures, without completely leveraging the richness of the financial data. We propose a novel domain specific Financial LANGuage model (FLANG) which uses financial keywords and phrases for better masking, together with span boundary objective and in-filing objective. Additionally, the evaluation benchmarks in the field have been limited. To this end, we contribute the Financial Language Understanding Evaluation (FLUE), an open-source comprehensive suite of benchmarks for the financial domain. These include new benchmarks across 5 NLP tasks in financial domain as well as common benchmarks used in the previous research. Experiments on these benchmarks suggest that our model outperforms those in prior literature on a variety of NLP tasks. Our models, code and benchmark data will be made publicly available on Github and Huggingface.", + "author": "Raj Shah; Kunal Chawla; Dheeraj Eidnani; Agam Shah; Wendi Du; Sudheer Chava; Natraj Raman; Charese Smiley; Jiaao Chen; Diyi Yang", + "authorids": "/r/raj-shah/; /k/kunal-chawla/; /d/dheeraj-eidnani/; /a/agam-shah/; /w/wendi-du/; /s/sudheer-chava/; /n/natraj-raman/; /c/charese-smiley/; /j/jiaao-chen/; /d/diyi-yang/", + "bibtex": "@inproceedings{shah-etal-2022-flue,\n title = \"When {FLUE} Meets {FLANG}: Benchmarks and Large Pretrained Language Model for Financial Domain\",\n author = \"Shah, Raj and\n Chawla, Kunal and\n Eidnani, Dheeraj and\n Shah, Agam and\n Du, Wendi and\n Chava, Sudheer and\n Raman, Natraj and\n Smiley, Charese and\n Chen, Jiaao and\n Yang, Diyi\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.148/\",\n doi = \"10.18653/v1/2022.emnlp-main.148\",\n pages = \"2322--2335\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.148.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.148/", + "pdf_size": 301671, + "gs_citation": 132, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17345696991854859641&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology; Georgia Institute of Technology; JPMorgan AI Research; JPMorgan AI Research; Georgia Institute of Technology; Stanford University", + "aff_domain": "gatech.edu;gatech.edu;gatech.edu;gatech.edu;gatech.edu;gatech.edu;jpmorgan.com;jpmchase.com;gatech.edu;cs.stanford.edu", + "email": "gatech.edu;gatech.edu;gatech.edu;gatech.edu;gatech.edu;gatech.edu;jpmorgan.com;jpmchase.com;gatech.edu;cs.stanford.edu", + "github": "https://salt-nlp.github.io/FLANG/", + "project": "", + "author_num": 10, + "aff_unique_index": "0;0;0;0;0;0;1;1;0;2", + "aff_unique_norm": "Georgia Institute of Technology;JPMorgan Chase & Co.;Stanford University", + "aff_unique_dep": ";JPMorgan AI Research;", + "aff_unique_url": "https://www.gatech.edu;https://www.jpmorganchase.com;https://www.stanford.edu", + "aff_unique_abbr": "Georgia Tech;JPM;Stanford", + "aff_campus_unique_index": "1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.21", + "title": "When Language Model Meets Private Library", + "track": "main", + "status": "finding", + "award": false, + "abstract": "With the rapid development of pre-training techniques, a number of language models have been pre-trained on large-scale code corpora and perform well in code generation. In this paper, we investigate how to equip pre-trained language models with the ability of code generation for private libraries. In practice, it is common for programmers to write code using private libraries. However, this is a challenge for language models since they have never seen private APIs during training. Motivated by the fact that private libraries usually come with elaborate API documentation, we propose a novel framework with two modules: the APIRetriever finds useful APIs, and then the APICoder generates code using these APIs. For APIRetriever, we present a dense retrieval system and also design a friendly interaction to involve uses. For APICoder, we can directly use off-the-shelf language models, or continually pre-train the base model on a code corpus containing API information. Both modules are trained with data from public libraries and can be generalized to private ones. Furthermore, we craft three benchmarks for private libraries, named TorchDataEval, MonkeyEval, and BeatNumEval. Experimental results demonstrate the impressive performance of our framework.", + "author": "Daoguang Zan; Bei Chen; Zeqi Lin; Bei Guan; Wang Yongji; Jian-Guang Lou", + "authorids": "/d/daoguang-zan/; /b/bei-chen/; /z/zeqi-lin/; /b/bei-guan/; /w/wang-yongji/; /j/jian-guang-lou/", + "bibtex": "@inproceedings{zan-etal-2022-language,\n title = \"When Language Model Meets Private Library\",\n author = \"Zan, Daoguang and\n Chen, Bei and\n Lin, Zeqi and\n Guan, Bei and\n Yongji, Wang and\n Lou, Jian-Guang\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.21/\",\n doi = \"10.18653/v1/2022.findings-emnlp.21\",\n pages = \"277--288\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.21.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.21/", + "pdf_size": 638452, + "gs_citation": 73, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11110019372551564821&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Cooperative Innovation Center, Institute of Software, Chinese Academy of Sciences+University of Chinese Academy of Sciences; Microsoft Research Asia; Microsoft Research Asia; University of Chinese Academy of Sciences+Integrative Innovation Center, Institute of Software, Chinese Academy of Sciences+State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences; University of Chinese Academy of Sciences+Integrative Innovation Center, Institute of Software, Chinese Academy of Sciences+State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences; Microsoft Research Asia", + "aff_domain": "iscas.ac.cn;microsoft.com;microsoft.com;iscas.ac.cn;itechs.iscas.ac.cn;microsoft.com", + "email": "iscas.ac.cn;microsoft.com;microsoft.com;iscas.ac.cn;itechs.iscas.ac.cn;microsoft.com", + "github": "https://github.com/microsoft/PyCodeGPT/tree/main/apicoder", + "project": "", + "author_num": 6, + "aff_unique_index": "0+1;2;2;1+0+0;1+0+0;2", + "aff_unique_norm": "Chinese Academy of Sciences;University of Chinese Academy of Sciences;Microsoft Research", + "aff_unique_dep": "Institute of Software;;Research", + "aff_unique_url": "http://www.cas.cn;http://www.ucas.ac.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "CAS;UCAS;MSR Asia", + "aff_campus_unique_index": ";1;1;;;1", + "aff_campus_unique": ";Asia", + "aff_country_unique_index": "0+0;0;0;0+0+0;0+0+0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.789", + "title": "When More Data Hurts: A Troubling Quirk in Developing Broad-Coverage Natural Language Understanding Systems", + "track": "main", + "status": "Main", + "award": false, + "abstract": "In natural language understanding (NLU) production systems, users\u2019 evolving needs necessitate the addition of new features over time, indexed by new symbols added to the meaning representation space. This requires additional training data and results in ever-growing datasets. We present the first systematic investigation into this incremental symbol learning scenario. Our analysis reveals a troubling quirk in building broad-coverage NLU systems: as the training dataset grows, performance on a small set of new symbols often decreases. We show that this trend holds for multiple mainstream models on two common NLU tasks: intent recognition and semantic parsing. Rejecting class imbalance as the sole culprit, we reveal that the trend is closely associated with an effect we call source signal dilution, where strong lexical cues for the new symbol become diluted as the training dataset grows. Selectively dropping training examples to prevent dilution often reverses the trend, showing the over-reliance of mainstream neural NLU models on simple lexical cues.", + "author": "Elias Stengel-Eskin; Emmanouil Antonios Platanios; Adam Pauls; Sam Thomson; Hao Fang; Benjamin Van Durme; Jason Eisner; Yu Su", + "authorids": "/e/elias-stengel-eskin/; /e/emmanouil-antonios-platanios/; /a/adam-pauls/; /s/sam-thomson/; /h/hao-fang/; /b/benjamin-van-durme/; /j/jason-eisner/; /y/yu-su/", + "bibtex": "@inproceedings{stengel-eskin-etal-2022-data,\n title = \"When More Data Hurts: A Troubling Quirk in Developing Broad-Coverage Natural Language Understanding Systems\",\n author = \"Stengel-Eskin, Elias and\n Platanios, Emmanouil Antonios and\n Pauls, Adam and\n Thomson, Sam and\n Fang, Hao and\n Van Durme, Benjamin and\n Eisner, Jason and\n Su, Yu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.789/\",\n doi = \"10.18653/v1/2022.emnlp-main.789\",\n pages = \"11473--11487\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.789.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.789/", + "pdf_size": 1212112, + "gs_citation": 3, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13855482293354530530&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 6, + "aff": "Johns Hopkins University; Microsoft Semantic Machines; Microsoft Semantic Machines; Microsoft Semantic Machines; Microsoft Semantic Machines; Microsoft Semantic Machines; Microsoft Semantic Machines; Microsoft Semantic Machines", + "aff_domain": "; ; ; ; ; ; ; ", + "email": "; ; ; ; ; ; ; ", + "github": "", + "project": "https://aka.ms/nlu-incremental-symbol-learning", + "author_num": 8, + "aff_unique_index": "0;1;1;1;1;1;1;1", + "aff_unique_norm": "Johns Hopkins University;Microsoft", + "aff_unique_dep": ";Semantic Machines", + "aff_unique_url": "https://www.jhu.edu;https://www.microsoft.com", + "aff_unique_abbr": "JHU;Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.540", + "title": "When does Parameter-Efficient Transfer Learning Work for Machine Translation?", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Parameter-efficient fine-tuning methods (PEFTs) offer the promise of adapting large pre-trained models while only tuning a small number of parameters. They have been shown to be competitive with full model fine-tuning for many downstream tasks. However, prior work indicates that PEFTs may not work as well for machine translation (MT), and there is no comprehensive study showing when PEFTs work for MT. We conduct a comprehensive empirical study of PEFTs for MT, considering (1) various parameter budgets, (2) a diverse set of language-pairs, and (3) different pre-trained models. We find that \u2018adapters\u2019, in which small feed-forward networks are added after every layer, are indeed on par with full model fine-tuning when the parameter budget corresponds to 10% of total model parameters. Nevertheless, as the number of tuned parameters decreases, the performance of PEFTs decreases. The magnitude of this decrease depends on the language pair, with PEFTs particularly struggling for distantly related language-pairs. We find that using PEFTs with a larger pre-trained model outperforms full fine-tuning with a smaller model, and for smaller training data sizes, PEFTs outperform full fine-tuning for the same pre-trained model.", + "author": "Ahmet \u00dcst\u00fcn; Asa Cooper Stickland", + "authorids": "/a/ahmet-ustun/; /a/asa-cooper-stickland/", + "bibtex": "@inproceedings{ustun-cooper-stickland-2022-parameter,\n title = \"When does Parameter-Efficient Transfer Learning Work for Machine Translation?\",\n author = {{\\\"U}st{\\\"u}n, Ahmet and\n Cooper Stickland, Asa},\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.540/\",\n doi = \"10.18653/v1/2022.emnlp-main.540\",\n pages = \"7919--7933\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.540.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.540/", + "pdf_size": 543114, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3120002226773665649&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 6, + "aff": "University of Groningen; University of Edinburgh", + "aff_domain": "rug.nl;ed.ac.uk", + "email": "rug.nl;ed.ac.uk", + "github": "https://github.com/ahmetustun/fairseqdata", + "project": "", + "author_num": 2, + "aff_unique_index": "0;1", + "aff_unique_norm": "University of Groningen;University of Edinburgh", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.rug.nl;https://www.ed.ac.uk", + "aff_unique_abbr": "RUG;Edinburgh", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1", + "aff_country_unique": "Netherlands;United Kingdom" + }, + { + "id": "2022.emnlp-main.165", + "title": "Whose Language Counts as High Quality? Measuring Language Ideologies in Text Data Selection", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Language models increasingly rely on massive web crawls for diverse text data. However, these sources are rife with undesirable content. As such, resources like Wikipedia, books, and news often serve as anchors for automatically selecting web text most suitable for language modeling, a process typically referred to as quality filtering. Using a new dataset of U.S. high school newspaper articles\u2014written by students from across the country\u2014we investigate whose language is preferred by the quality filter used for GPT-3. We find that newspapers from larger schools, located in wealthier, educated, and urban zones (ZIP codes) are more likely to be classified as high quality. We also show that this quality measurement is unaligned with other sensible metrics, such as factuality or literary acclaim. We argue that privileging any corpus as high quality entails a language ideology, and more care is needed to construct training corpora for language models, with better transparency and justification for the inclusion or exclusion of various texts.", + "author": "Suchin Gururangan; Dallas Card; Sarah Dreier; Emily Gade; Leroy Wang; Zeyu Wang; Luke Zettlemoyer; Noah A. Smith", + "authorids": "/s/suchin-gururangan/; /d/dallas-card/; /s/sarah-dreier/; /e/emily-gade/; /l/leroy-wang/; /z/zeyu-wang/; /l/luke-zettlemoyer/; /n/noah-a-smith/", + "bibtex": "@inproceedings{gururangan-etal-2022-whose,\n title = \"Whose Language Counts as High Quality? Measuring Language Ideologies in Text Data Selection\",\n author = \"Gururangan, Suchin and\n Card, Dallas and\n Dreier, Sarah and\n Gade, Emily and\n Wang, Leroy and\n Wang, Zeyu and\n Zettlemoyer, Luke and\n Smith, Noah A.\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.165/\",\n doi = \"10.18653/v1/2022.emnlp-main.165\",\n pages = \"2562--2580\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.165.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.165/", + "pdf_size": 509857, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2458375305459291190&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Washington; University of Michigan; University of New Mexico; Emory University; University of Washington + Allen Institute for AI; University of Washington + Allen Institute for AI; University of Washington + Allen Institute for AI; University of Washington + Allen Institute for AI", + "aff_domain": "cs.washington.edu;umich.edu;unm.edu;emory.edu;uw.edu;cs.washington.edu;cs.washington.edu;cs.washington.edu", + "email": "cs.washington.edu;umich.edu;unm.edu;emory.edu;uw.edu;cs.washington.edu;cs.washington.edu;cs.washington.edu", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;1;2;3;0+4;0+4;0+4;0+4", + "aff_unique_norm": "University of Washington;University of Michigan;University of New Mexico;Emory University;Allen Institute for AI", + "aff_unique_dep": ";;;;", + "aff_unique_url": "https://www.washington.edu;https://www.umich.edu;https://www.unm.edu;https://www.emory.edu;https://allenai.org", + "aff_unique_abbr": "UW;UM;UNM;Emory;AI2", + "aff_campus_unique_index": ";;;", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0+0;0+0;0+0;0+0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.642", + "title": "Why Do You Feel This Way? Summarizing Triggers of Emotions in Social Media Posts", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Crises such as the COVID-19 pandemic continuously threaten our world and emotionally affect billions of people worldwide in distinct ways. Understanding the triggers leading to people\u2019s emotions is of crucial importance. Social media posts can be a good source of such analysis, yet these texts tend to be charged with multiple emotions, with triggers scattering across multiple sentences. This paper takes a novel angle, namely, emotion detection and trigger summarization, aiming to both detect perceived emotions in text, and summarize events and their appraisals that trigger each emotion. To support this goal, we introduce CovidET (Emotions and their Triggers during Covid-19), a dataset of ~1,900 English Reddit posts related to COVID-19, which contains manual annotations of perceived emotions and abstractive summaries of their triggers described in the post. We develop strong baselines to jointly detect emotions and summarize emotion triggers. Our analyses show that CovidET presents new challenges in emotion-specific summarization, as well as multi-emotion detection in long social media posts.", + "author": "Hongli Zhan; Tiberiu Sosea; Cornelia Caragea; Junyi Jessy Li", + "authorids": "/h/hongli-zhan/; /t/tiberiu-sosea/; /c/cornelia-caragea/; /j/junyi-jessy-li/", + "bibtex": "@inproceedings{zhan-etal-2022-feel,\n title = \"Why Do You Feel This Way? Summarizing Triggers of Emotions in Social Media Posts\",\n author = \"Zhan, Hongli and\n Sosea, Tiberiu and\n Caragea, Cornelia and\n Li, Junyi Jessy\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.642/\",\n doi = \"10.18653/v1/2022.emnlp-main.642\",\n pages = \"9436--9453\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.642.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.642/", + "pdf_size": 3219279, + "gs_citation": 19, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13381668361478188810&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": "Department of Linguistics, The University of Texas at Austin; Department of Computer Science, University of Illinois Chicago; Department of Computer Science, University of Illinois Chicago; Department of Linguistics, The University of Texas at Austin", + "aff_domain": "utexas.edu;uic.edu;uic.edu;utexas.edu", + "email": "utexas.edu;uic.edu;uic.edu;utexas.edu", + "github": "", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;1;0", + "aff_unique_norm": "The University of Texas at Austin;University of Illinois Chicago", + "aff_unique_dep": "Department of Linguistics;Department of Computer Science", + "aff_unique_url": "https://www.utexas.edu;https://www.uic.edu", + "aff_unique_abbr": "UT Austin;UIC", + "aff_campus_unique_index": "0;1;1;0", + "aff_campus_unique": "Austin;Chicago", + "aff_country_unique_index": "0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.771", + "title": "Why Should Adversarial Perturbations be Imperceptible? Rethink the Research Paradigm in Adversarial NLP", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Textual adversarial samples play important roles in multiple subfields of NLP research, including security, evaluation, explainability, and data augmentation. However, most work mixes all these roles, obscuring the problem definitions and research goals of the security role that aims to reveal the practical concerns of NLP models. In this paper, we rethink the research paradigm of textual adversarial samples in security scenarios. We discuss the deficiencies in previous work and propose our suggestions that the research on the Security-oriented adversarial NLP (SoadNLP) should: (1) evaluate their methods on security tasks to demonstrate the real-world concerns; (2) consider real-world attackers\u2019 goals, instead of developing impractical methods. To this end, we first collect, process, and release a security datasets collection Advbench. Then, we reformalize the task and adjust the emphasis on different goals in SoadNLP. Next, we propose a simple method based on heuristic rules that can easily fulfill the actual adversarial goals to simulate real-world attack methods. We conduct experiments on both the attack and the defense sides on Advbench. Experimental results show that our method has higher practical value, indicating that the research paradigm in SoadNLP may start from our new benchmark. All the code and data of Advbench can be obtained at https://github.com/thunlp/Advbench.", + "author": "Yangyi Chen; Hongcheng Gao; Ganqu Cui; Fanchao Qi; Longtao Huang; Zhiyuan Liu; Maosong Sun", + "authorids": "/y/yangyi-chen/; /h/hongcheng-gao/; /g/ganqu-cui/; /f/fanchao-qi/; /l/longtao-huang/; /z/zhiyuan-liu/; /m/maosong-sun/", + "bibtex": "@inproceedings{chen-etal-2022-adversarial,\n title = \"Why Should Adversarial Perturbations be Imperceptible? Rethink the Research Paradigm in Adversarial {NLP}\",\n author = \"Chen, Yangyi and\n Gao, Hongcheng and\n Cui, Ganqu and\n Qi, Fanchao and\n Huang, Longtao and\n Liu, Zhiyuan and\n Sun, Maosong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.771/\",\n doi = \"10.18653/v1/2022.emnlp-main.771\",\n pages = \"11222--11237\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.771.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.771/", + "pdf_size": 443415, + "gs_citation": 50, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17493985021941749967&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "NLP Group, DCST, IAI, BNRIST, Tsinghua University + University of Illinois Urbana-Champaign; NLP Group, DCST, IAI, BNRIST, Tsinghua University + Chongqing University; NLP Group, DCST, IAI, BNRIST, Tsinghua University; NLP Group, DCST, IAI, BNRIST, Tsinghua University; Alibaba Group; NLP Group, DCST, IAI, BNRIST, Tsinghua University + IICTUS, Shanghai; NLP Group, DCST, IAI, BNRIST, Tsinghua University + IICTUS, Shanghai", + "aff_domain": "illinois.edu;gmail.com; ; ; ; ; ", + "email": "illinois.edu;gmail.com; ; ; ; ; ", + "github": "https://github.com/thunlp/Advbench", + "project": "", + "author_num": 7, + "aff_unique_index": "0+1;0+2;0;0;3;0+4;0+4", + "aff_unique_norm": "Tsinghua University;University of Illinois at Urbana-Champaign;Chongqing University;Alibaba Group;IICTUS", + "aff_unique_dep": "NLP Group;;;;", + "aff_unique_url": "https://www.tsinghua.edu.cn;https://illinois.edu;https://www.cqu.edu.cn;https://www.alibaba.com;", + "aff_unique_abbr": "THU;UIUC;CQU;Alibaba;", + "aff_campus_unique_index": "1;;2;2", + "aff_campus_unique": ";Urbana-Champaign;Shanghai", + "aff_country_unique_index": "0+1;0+0;0;0;0;0+0;0+0", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.143", + "title": "Why is Winoground Hard? Investigating Failures in Visuolinguistic Compositionality", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent visuolinguistic pre-trained models show promising progress on various end tasks such as image retrieval and video captioning. Yet, they fail miserably on the recently proposed Winoground dataset, which challenges models to match paired images and English captions, with items constructed to overlap lexically but differ in meaning (e.g., \u201cthere is a mug in some grass\u201d vs. \u201cthere is some grass in a mug\u201d). By annotating the dataset using new fine-grained tags, we show that solving the Winoground task requires not just compositional language understanding, but a host of other abilities like commonsense reasoning or locating small, out-of-focus objects in low-resolution images. In this paper, we identify the dataset\u2019s main challenges through a suite of experiments on related tasks (probing task, image retrieval task), data augmentation, and manual inspection of the dataset. Our analysis suggests that a main challenge in visuolinguistic models may lie in fusing visual and textual representations, rather than in compositional language understanding. We release our annotation and code at https://github.com/ajd12342/why-winoground-hard.", + "author": "Anuj Diwan; Layne Berry; Eunsol Choi; David Harwath; Kyle Mahowald", + "authorids": "/a/anuj-diwan/; /l/layne-berry/; /e/eunsol-choi/; /d/david-harwath/; /k/kyle-mahowald/", + "bibtex": "@inproceedings{diwan-etal-2022-winoground,\n title = \"Why is Winoground Hard? Investigating Failures in Visuolinguistic Compositionality\",\n author = \"Diwan, Anuj and\n Berry, Layne and\n Choi, Eunsol and\n Harwath, David and\n Mahowald, Kyle\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.143/\",\n doi = \"10.18653/v1/2022.emnlp-main.143\",\n pages = \"2236--2250\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.143.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.143/", + "pdf_size": 6259275, + "gs_citation": 57, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12709715119898586679&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 6, + "aff": "Department of Computer Science; Department of Computer Science; Department of Computer Science; Department of Computer Science; Department of Linguistics", + "aff_domain": "utexas.edu;utexas.edu;utexas.edu;utexas.edu;utexas.edu", + "email": "utexas.edu;utexas.edu;utexas.edu;utexas.edu;utexas.edu", + "github": "https://github.com/ajd12342/why-winoground-hard", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;1", + "aff_unique_norm": "Unknown Institution;University Affiliation Not Specified", + "aff_unique_dep": "Department of Computer Science;Department of Linguistics", + "aff_unique_url": ";", + "aff_unique_abbr": ";", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "", + "aff_country_unique": "" + }, + { + "id": "2022.emnlp-main.345", + "title": "Wider & Closer: Mixture of Short-channel Distillers for Zero-shot Cross-lingual Named Entity Recognition", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Zero-shot cross-lingual named entity recognition (NER) aims at transferring knowledge from annotated and rich-resource data in source languages to unlabeled and lean-resource data in target languages. Existing mainstream methods based on the teacher-student distillation framework ignore the rich and complementary information lying in the intermediate layers of pre-trained language models, and domain-invariant information is easily lost during transfer. In this study, a mixture of short-channel distillers (MSD) method is proposed to fully interact the rich hierarchical information in the teacher model and to transfer knowledge to the student model sufficiently and efficiently. Concretely, a multi-channel distillation framework is designed for sufficient information transfer by aggregating multiple distillers as a mixture. Besides, an unsupervised method adopting parallel domain adaptation is proposed to shorten the channels between the teacher and student models to preserve domain-invariant features. Experiments on four datasets across nine languages demonstrate that the proposed method achieves new state-of-the-art performance on zero-shot cross-lingual NER and shows great generalization and compatibility across languages and fields.", + "author": "Jun-Yu Ma; Beiduo Chen; Jia-Chen Gu; Zhenhua Ling; Wu Guo; Quan Liu; Zhigang Chen; Cong Liu", + "authorids": "/j/jun-yu-ma/; /b/beiduo-chen/; /j/jia-chen-gu/; /z/zhenhua-ling/; /w/wu-guo/; /q/quan-liu/; /z/zhigang-chen/; /c/cong-liu-iflytek/", + "bibtex": "@inproceedings{ma-etal-2022-wider,\n title = \"Wider {\\&} Closer: Mixture of Short-channel Distillers for Zero-shot Cross-lingual Named Entity Recognition\",\n author = \"Ma, Jun-Yu and\n Chen, Beiduo and\n Gu, Jia-Chen and\n Ling, Zhenhua and\n Guo, Wu and\n Liu, Quan and\n Chen, Zhigang and\n Liu, Cong\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.345/\",\n doi = \"10.18653/v1/2022.emnlp-main.345\",\n pages = \"5171--5183\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.345.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.345/", + "pdf_size": 832965, + "gs_citation": 11, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14497940980572080265&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 7, + "aff": "National Engineering Research Center of Speech and Language Information Processing, University of Science and Technology of China, Hefei, China; National Engineering Research Center of Speech and Language Information Processing, University of Science and Technology of China, Hefei, China; National Engineering Research Center of Speech and Language Information Processing, University of Science and Technology of China, Hefei, China; National Engineering Research Center of Speech and Language Information Processing, University of Science and Technology of China, Hefei, China; National Engineering Research Center of Speech and Language Information Processing, University of Science and Technology of China, Hefei, China; State Key Laboratory of Cognitive Intelligence+iFLYTEK Research, Hefei, China; Jilin Kexun Information Technology Co., Ltd.; National Engineering Research Center of Speech and Language Information Processing, University of Science and Technology of China, Hefei, China+iFLYTEK Research, Hefei, China", + "aff_domain": "mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;ustc.edu.cn;ustc.edu.cn;iflytek.com;iflytek.com;iflytek.com", + "email": "mail.ustc.edu.cn;mail.ustc.edu.cn;ustc.edu.cn;ustc.edu.cn;ustc.edu.cn;iflytek.com;iflytek.com;iflytek.com", + "github": "", + "project": "", + "author_num": 8, + "aff_unique_index": "0;0;0;0;0;1+2;3;0+2", + "aff_unique_norm": "University of Science and Technology of China;State Key Laboratory of Cognitive Intelligence;iFLYTEK Research;Jilin Kexun Information Technology Co., Ltd.", + "aff_unique_dep": "National Engineering Research Center of Speech and Language Information Processing;;;", + "aff_unique_url": "http://www.ustc.edu.cn;;https://www.iflytek.com;", + "aff_unique_abbr": "USTC;;iFLYTEK;", + "aff_campus_unique_index": "0;0;0;0;0;0;0+0", + "aff_campus_unique": "Hefei;", + "aff_country_unique_index": "0;0;0;0;0;0+0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.findings-emnlp.65", + "title": "Wish I Can Feel What You Feel: A Neural Approach for Empathetic Response Generation", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Expressing empathy is important in everyday conversations, and exploring how empathy arises is crucial in automatic response generation. Most previous approaches consider only a single factor that affects empathy. However, in practice, empathy generation and expression is a very complex and dynamic psychological process. A listener needs to find out events which cause a speaker\u2019s emotions (emotion cause extraction), project the events into some experience (knowledge extension), and express empathy in the most appropriate way (communication mechanism).To this end, we propose a novel approach, which integrates the three components - emotion cause, knowledge graph, and communication mechanism for empathetic response generation.Experimental results on the benchmark dataset demonstrate the effectiveness of our method and show that incorporating the key components generates more informative and empathetic responses.", + "author": "Yangbin Chen; Chunfeng Liang", + "authorids": "/y/yangbin-chen/; /c/chunfeng-liang/", + "bibtex": "@inproceedings{chen-liang-2022-wish,\n title = \"Wish {I} Can Feel What You Feel: A Neural Approach for Empathetic Response Generation\",\n author = \"Chen, Yangbin and\n Liang, Chunfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.65/\",\n doi = \"10.18653/v1/2022.findings-emnlp.65\",\n pages = \"922--933\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.65.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.65/", + "pdf_size": 6341796, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4981859193067407234&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Suzhou Fubian Medical Technology Co., Ltd., China; Suzhou Fubian Medical Technology Co., Ltd., China", + "aff_domain": "gmail.com;gmail.com", + "email": "gmail.com;gmail.com", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "Suzhou Fubian Medical Technology Co., Ltd.", + "aff_unique_dep": "", + "aff_unique_url": "", + "aff_unique_abbr": "", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.118", + "title": "Word Order Matters When You Increase Masking", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Word order, an essential property of natural languages, is injected in Transformer-based neural language models using position encoding. However, recent experiments have shown that explicit position encoding is not always useful, since some models without such feature managed to achieve state-of-the art performance on some tasks. To understand better this phenomenon, we examine the effect of removing position encodings on the pre-training objective itself (i.e., masked language modelling), to test whether models can reconstruct position information from co-occurrences alone. We do so by controlling the amount of masked tokens in the input sentence, as a proxy to affect the importance of position information for the task. We find that the necessity of position information increases with the amount of masking, and that masked language models without position encodings are not able to reconstruct this information on the task. These findings point towards a direct relationship between the amount of masking and the ability of Transformers to capture order-sensitive aspects of language using position encoding.", + "author": "Karim Lasri; Alessandro Lenci; Thierry Poibeau", + "authorids": "/k/karim-lasri/; /a/alessandro-lenci/; /t/thierry-poibeau/", + "bibtex": "@inproceedings{lasri-etal-2022-word,\n title = \"Word Order Matters When You Increase Masking\",\n author = \"Lasri, Karim and\n Lenci, Alessandro and\n Poibeau, Thierry\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.118/\",\n doi = \"10.18653/v1/2022.emnlp-main.118\",\n pages = \"1808--1815\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.118.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.118/", + "pdf_size": 335964, + "gs_citation": 5, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4044194849518194205&as_sdt=5,47&sciodt=0,47&hl=en", + "gs_version_total": 8, + "aff": "Lattice (\u00c9cole Normale Sup\u00e9rieure-PSL, CNRS, U. Sorbonne Nouvelle); University of Pisa; Lattice (\u00c9cole Normale Sup\u00e9rieure-PSL, CNRS, U. Sorbonne Nouvelle)", + "aff_domain": "ens.psl.eu;unipi.it;ens.psl.eu", + "email": "ens.psl.eu;unipi.it;ens.psl.eu", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "\u00c9cole Normale Sup\u00e9rieure-PSL;University of Pisa", + "aff_unique_dep": "Lattice;", + "aff_unique_url": "https://www.ens.psl.eu;https://www.unipi.it", + "aff_unique_abbr": "ENS-PSL;UNIP", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "France;Italy" + }, + { + "id": "2022.findings-emnlp.440", + "title": "WordTies: Measuring Word Associations in Language Models via Constrained Sampling", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Word associations are widely used in psychology to provide insights on how humans perceive and understand concepts. Comparing word associations in language models (LMs) to those generated by human subjects can serve as a proxy to uncover embedded lexical and commonsense knowledge in language models. While much helpful work has been done applying direct metrics, such as cosine similarity, to help understand latent spaces, these metrics are symmetric, while human word associativity is asymmetric. We propose WordTies, an algorithm based on constrained sampling from LMs, which allows an asymmetric measurement of associated words, given a cue word as the input. Comparing to existing methods, word associations found by this method share more overlap with associations provided by humans, and observe the asymmetric property of human associations. To examine possible reasons behind associations, we analyze the knowledge and reasoning behind the word pairings as they are linked to lexical and commonsense knowledge graphs.When the knowledge about the nature of the word pairings is combined with a probability that the LM has learned that information, we have a new way to examine what information is captured in LMs.", + "author": "Peiran Yao; Tobias Renwick; Denilson Barbosa", + "authorids": "/p/peiran-yao/; /t/tobias-renwick/; /d/denilson-barbosa/", + "bibtex": "@inproceedings{yao-etal-2022-wordties,\n title = \"{W}ord{T}ies: Measuring Word Associations in Language Models via Constrained Sampling\",\n author = \"Yao, Peiran and\n Renwick, Tobias and\n Barbosa, Denilson\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.440/\",\n doi = \"10.18653/v1/2022.findings-emnlp.440\",\n pages = \"5959--5970\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.440.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.440/", + "pdf_size": 349588, + "gs_citation": 6, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3905418191286012983&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Department of Computing Science, University of Alberta; Department of Computing Science, University of Alberta; Department of Computing Science, University of Alberta", + "aff_domain": "ualberta.ca;ualberta.ca;ualberta.ca", + "email": "ualberta.ca;ualberta.ca;ualberta.ca", + "github": "https://github.com/U-Alberta/WordTies", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Alberta", + "aff_unique_dep": "Department of Computing Science", + "aff_unique_url": "https://www.ualberta.ca", + "aff_unique_abbr": "UAlberta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Canada" + }, + { + "id": "2022.emnlp-main.478", + "title": "X-FACTOR: A Cross-metric Evaluation of Factual Correctness in Abstractive Summarization", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Abstractive summarization models often produce factually inconsistent summaries that are not supported by the original article. Recently, a number of fact-consistent evaluation techniques have been proposed to address this issue; however, a detailed analysis of how these metrics agree with one another has yet to be conducted. In this paper, we present X-FACTOR, a cross-evaluation of three high-performing fact-aware abstractive summarization methods. First, we show that summarization models are often fine-tuned on datasets that contain factually inconsistent summaries and propose a fact-aware filtering mechanism that improves the quality of training data and, consequently, the factuality of these models. Second, we propose a corrector module that can be used to improve the factual consistency of generated summaries. Third, we present a re-ranking technique that samples summary instances from the output distribution of a summarization model and re-ranks the sampled instances based on their factuality. Finally, we provide a detailed cross-metric agreement analysis that shows how tuning a model to output summaries based on a particular factuality metric influences factuality as determined by the other metrics. Our goal in this work is to facilitate research that improves the factuality and faithfulness of abstractive summarization models.", + "author": "Subhajit Chaudhury; Sarathkrishna Swaminathan; Chulaka Gunasekara; Maxwell Crouse; Srinivas Ravishankar; Daiki Kimura; Keerthiram Murugesan; Ram\u00f3n Fernandez Astudillo; Tahira Naseem; Pavan Kapanipathi; Alexander Gray", + "authorids": "/s/subhajit-chaudhury/; /s/sarathkrishna-swaminathan/; /c/chulaka-gunasekara/; /m/maxwell-crouse/; /s/srinivas-ravishankar/; /d/daiki-kimura/; /k/keerthiram-murugesan/; /r/ramon-fernandez-astudillo/; /t/tahira-naseem/; /p/pavan-kapanipathi/; /a/alexander-gray/", + "bibtex": "@inproceedings{chaudhury-etal-2022-x,\n title = \"{X}-{FACTOR}: A Cross-metric Evaluation of Factual Correctness in Abstractive Summarization\",\n author = \"Chaudhury, Subhajit and\n Swaminathan, Sarathkrishna and\n Gunasekara, Chulaka and\n Crouse, Maxwell and\n Ravishankar, Srinivas and\n Kimura, Daiki and\n Murugesan, Keerthiram and\n Fernandez Astudillo, Ram{\\'o}n and\n Naseem, Tahira and\n Kapanipathi, Pavan and\n Gray, Alexander\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.478/\",\n doi = \"10.18653/v1/2022.emnlp-main.478\",\n pages = \"7100--7110\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.478.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.478/", + "pdf_size": 266502, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7464813805179137744&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 2, + "aff": "IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research", + "aff_domain": "ibm.com;ibm.com;ibm.com;ibm.com;ibm.com;jp.ibm.com;ibm.com;ibm.com;us.ibm.com;us.ibm.com;ibm.com", + "email": "ibm.com;ibm.com;ibm.com;ibm.com;ibm.com;jp.ibm.com;ibm.com;ibm.com;us.ibm.com;us.ibm.com;ibm.com", + "github": "", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "IBM", + "aff_unique_dep": "IBM Research", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.71", + "title": "XDoc: Unified Pre-training for Cross-Format Document Understanding", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The surge of pre-training has witnessed the rapid development of document understanding recently. Pre-training and fine-tuning framework has been effectively used to tackle texts in various formats, including plain texts, document texts, and web texts. Despite achieving promising performance, existing pre-trained models usually target one specific document format at one time, making it difficult to combine knowledge from multiple document formats. To address this, we propose XDoc, a unified pre-trained model which deals with different document formats in a single model. For parameter efficiency, we share backbone parameters for different formats such as the word embedding layer and the Transformer layers. Meanwhile, we introduce adaptive layers with lightweight parameters to enhance the distinction across different formats. Experimental results have demonstrated that with only 36.7% parameters, XDoc achieves comparable or even better performance on a variety of downstream tasks compared with the individual pre-trained models, which is cost effective for real-world deployment. The code and pre-trained models are publicly available at https://aka.ms/xdoc.", + "author": "Jingye Chen; Tengchao Lv; Lei Cui; Cha Zhang; Furu Wei", + "authorids": "/j/jingye-chen/; /t/tengchao-lv/; /l/lei-cui/; /c/cha-zhang/; /f/furu-wei/", + "bibtex": "@inproceedings{chen-etal-2022-xdoc,\n title = \"{XD}oc: Unified Pre-training for Cross-Format Document Understanding\",\n author = \"Chen, Jingye and\n Lv, Tengchao and\n Cui, Lei and\n Zhang, Cha and\n Wei, Furu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.71/\",\n doi = \"10.18653/v1/2022.findings-emnlp.71\",\n pages = \"1006--1016\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.71.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.71/", + "pdf_size": 906789, + "gs_citation": 13, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12951282881383427393&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation; Microsoft Corporation", + "aff_domain": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "email": "microsoft.com;microsoft.com;microsoft.com;microsoft.com;microsoft.com", + "github": "", + "project": "https://aka.ms/xdoc", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Microsoft Corporation", + "aff_unique_dep": "", + "aff_unique_url": "https://www.microsoft.com", + "aff_unique_abbr": "Microsoft", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.466", + "title": "XLM-D: Decorate Cross-lingual Pre-training Model as Non-Autoregressive Neural Machine Translation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Pre-training language models have achieved thriving success in numerous natural language understanding and autoregressive generation tasks, but non-autoregressive generation in applications such as machine translation has not sufficiently benefited from the pre-training paradigm. In this work, we establish the connection between a pre-trained masked language model (MLM) and non-autoregressive generation on machine translation. From this perspective, we present XLM-D, which seamlessly transforms an off-the-shelf cross-lingual pre-training model into a non-autoregressive translation (NAT) model with a lightweight yet effective decorator. Specifically, the decorator ensures the representation consistency of the pre-trained model and brings only one additional trainable parameter. Extensive experiments on typical translation datasets show that our models obtain state-of-the-art performance while realizing the inference speed-up by 19.9x. One striking result is that on WMT14 En-De, our XLM-D obtains 29.80 BLEU points with multiple iterations, which outperforms the previous mask-predict model by 2.77 points.", + "author": "Yong Wang; Shilin He; Guanhua Chen; Yun Chen; Daxin Jiang", + "authorids": "/y/yong-wang/; /s/shilin-he/; /g/guanhua-chen/; /y/yun-chen/; /d/daxin-jiang/", + "bibtex": "@inproceedings{wang-etal-2022-xlm,\n title = \"{XLM}-{D}: Decorate Cross-lingual Pre-training Model as Non-Autoregressive Neural Machine Translation\",\n author = \"Wang, Yong and\n He, Shilin and\n Chen, Guanhua and\n Chen, Yun and\n Jiang, Daxin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.466/\",\n doi = \"10.18653/v1/2022.emnlp-main.466\",\n pages = \"6934--6946\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.466.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.466/", + "pdf_size": 1327213, + "gs_citation": 7, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13659557305821467494&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 0, + "aff": "Tencent Corporation; Microsoft Corporation; Southern University of Science and Technology; Shanghai University of Finance and Economics; Microsoft Corporation", + "aff_domain": "gmail.com;microsoft.com;gmail.com;sufe.edu.cn;microsoft.com", + "email": "gmail.com;microsoft.com;gmail.com;sufe.edu.cn;microsoft.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;2;3;1", + "aff_unique_norm": "Tencent;Microsoft Corporation;Southern University of Science and Technology;Shanghai University of Finance and Economics", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://www.tencent.com;https://www.microsoft.com;https://www.sustech.edu.cn;http://www.sufe.edu.cn", + "aff_unique_abbr": "Tencent;Microsoft;SUSTech;SUFE", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0;1", + "aff_country_unique": "China;United States" + }, + { + "id": "2022.emnlp-main.758", + "title": "XPrompt: Exploring the Extreme of Prompt Tuning", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prompt tuning learns soft prompts to condition the frozen Pre-trained Language Models (PLMs) for performing downstream tasks in a parameter-efficient manner. While prompt tuning has gradually reached the performance level of fine-tuning as the model scale increases, there is still a large performance gap between prompt tuning and fine-tuning for models of moderate and small scales (typically less than 11B parameters). In this paper, we empirically show that the trained prompt tokens can have a negative impact on a downstream task and thus degrade its performance. To bridge the gap, we propose a novel Prompt tuning model with an eXtremely small scale (XPrompt) under the regime of lottery tickets hypothesis. Specifically, XPrompt eliminates the negative prompt tokens at different granularity levels through a hierarchical structured pruning, yielding a more parameter-efficient prompt yet with a competitive performance. Comprehensive experiments are carried out on the SuperGLUE tasks, and the results indicate that XPrompt is able to close the performance gap at smaller model scales.", + "author": "Fang Ma; Chen Zhang; Lei Ren; Jingang Wang; Qifan Wang; Wei Wu; Xiaojun Quan; Dawei Song", + "authorids": "/f/fang-ma/; /c/chen-zhang/; /l/lei-ren/; /j/jingang-wang/; /q/qifan-wang/; /w/wei-wu/; /x/xiaojun-quan/; /d/dawei-song/", + "bibtex": "@inproceedings{ma-etal-2022-xprompt,\n title = \"{XP}rompt: Exploring the Extreme of Prompt Tuning\",\n author = \"Ma, Fang and\n Zhang, Chen and\n Ren, Lei and\n Wang, Jingang and\n Wang, Qifan and\n Wu, Wei and\n Quan, Xiaojun and\n Song, Dawei\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.758/\",\n doi = \"10.18653/v1/2022.emnlp-main.758\",\n pages = \"11033--11047\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.758.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.758/", + "pdf_size": 1300045, + "gs_citation": 39, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1904156666692300573&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 7, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "https://github.com/BD-MF/XPrompt", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.384", + "title": "XRICL: Cross-lingual Retrieval-Augmented In-Context Learning for Cross-lingual Text-to-SQL Semantic Parsing", + "track": "main", + "status": "finding", + "award": false, + "abstract": "In-context learning using large language models has recently shown surprising results for semantic parsing tasks such as Text-to-SQL translation.Prompting GPT-3 or Codex using several examples of question-SQL pairs can produce excellent results, comparable to state-of-the-art finetuning-based models.However, existing work primarily focuses on English datasets, and it is unknown whether large language models can serve as competitive semantic parsers for other languages.To bridge this gap, our work focuses on cross-lingual Text-to-SQL semantic parsing for translating non-English utterances into SQL queries based on an English schema.We consider a zero-shot transfer learning setting with the assumption that we do not have any labeled examples in the target language (but have annotated examples in English).This work introduces the XRICL framework, which learns to retrieve relevant English exemplars for a given query to construct prompts.We also include global translation exemplars for a target language to facilitate the translation process for large language models.To systematically evaluate our model, we construct two new benchmark datasets, XSpider and XKaggle-dbqa, which include questions in Chinese, Vietnamese, Farsi, and Hindi.Our experiments show that XRICL effectively leverages large pre-trained language models to outperform existing baselines.Data and code are publicly available at https://github.com/Impavidity/XRICL.", + "author": "Peng Shi; Rui Zhang; He Bai; Jimmy Lin", + "authorids": "/p/peng-shi/; /r/rui-zhang/; /h/he-bai/; /j/jimmy-lin/", + "bibtex": "@inproceedings{shi-etal-2022-xricl,\n title = \"{XRICL}: Cross-lingual Retrieval-Augmented In-Context Learning for Cross-lingual Text-to-{SQL} Semantic Parsing\",\n author = \"Shi, Peng and\n Zhang, Rui and\n Bai, He and\n Lin, Jimmy\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.384/\",\n doi = \"10.18653/v1/2022.findings-emnlp.384\",\n pages = \"5248--5259\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.384.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.384/", + "pdf_size": 366655, + "gs_citation": 38, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1669859422679036077&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "University of Waterloo\u2660; Penn State University\u2663; University of Waterloo\u2660; University of Waterloo\u2660", + "aff_domain": "uwaterloo.ca;psu.edu;uwaterloo.ca;uwaterloo.ca", + "email": "uwaterloo.ca;psu.edu;uwaterloo.ca;uwaterloo.ca", + "github": "https://github.com/Impavidity/XRICL", + "project": "", + "author_num": 4, + "aff_unique_index": "0;1;0;0", + "aff_unique_norm": "University of Waterloo;Penn State University", + "aff_unique_dep": ";", + "aff_unique_url": "https://uwaterloo.ca;https://www.psu.edu", + "aff_unique_abbr": "UW;PSU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;0;0", + "aff_country_unique": "Canada;United States" + }, + { + "id": "2022.findings-emnlp.23", + "title": "Yes-Yes-Yes: Proactive Data Collection for ACL Rolling Review and Beyond", + "track": "main", + "status": "finding", + "award": false, + "abstract": "The shift towards publicly available text sources has enabled language processing at unprecedented scale, yet leaves under-serviced the domains where public and openly licensed data is scarce. Proactively collecting text data for research is a viable strategy to address this scarcity, but lacks systematic methodology taking into account the many ethical, legal and confidentiality-related aspects of data collection. Our work presents a case study on proactive data collection in peer review \u2013 a challenging and under-resourced NLP domain. We outline ethical and legal desiderata for proactive data collection and introduce \u201cYes-Yes-Yes\u201d, the first donation-based peer reviewing data collection workflow that meets these requirements. We report on the implementation of Yes-Yes-Yes at ACL Rolling Review and empirically study the implications of proactive data collection for the dataset size and the biases induced by the donation behavior on the peer reviewing platform.", + "author": "Nils Dycke; Ilia Kuznetsov; Iryna Gurevych", + "authorids": "/n/nils-dycke/; /i/ilia-kuznetsov/; /i/iryna-gurevych/", + "bibtex": "@inproceedings{dycke-etal-2022-yes,\n title = \"Yes-Yes-Yes: Proactive Data Collection for {ACL} Rolling Review and Beyond\",\n author = \"Dycke, Nils and\n Kuznetsov, Ilia and\n Gurevych, Iryna\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.23/\",\n doi = \"10.18653/v1/2022.findings-emnlp.23\",\n pages = \"300--318\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.23.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.23/", + "pdf_size": 324762, + "gs_citation": 12, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=12533499774771100336&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University of Darmstadt; Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University of Darmstadt; Ubiquitous Knowledge Processing Lab (UKP Lab) + Department of Computer Science and Hessian Center for AI (hessian.AI) + Technical University of Darmstadt", + "aff_domain": ";;", + "email": ";;", + "github": "https://github.com/UKPLab/openreview-licensing-workflow", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1+2;0+1+2;0+1+2", + "aff_unique_norm": "University of Duisburg-Essen;University of Massachusetts Amherst;Technical University of Darmstadt", + "aff_unique_dep": "Ubiquitous Knowledge Processing Lab;Department of Computer Science;", + "aff_unique_url": "https://www.ukp.tu-darmstadt.de/;https://www.cics.umass.edu;https://www.tu-darmstadt.de", + "aff_unique_abbr": "UKP Lab;UMass CS;TUD", + "aff_campus_unique_index": ";;", + "aff_campus_unique": "", + "aff_country_unique_index": "0+1+0;0+1+0;0+1+0", + "aff_country_unique": "Germany;United States" + }, + { + "id": "2022.findings-emnlp.336", + "title": "You Are My Type! Type Embeddings for Pre-trained Language Models", + "track": "main", + "status": "finding", + "award": false, + "abstract": "One reason for the positive impact of Pre-trained Language Models (PLMs) in NLP tasks is their ability to encode semantic types, such as \u2018European City\u2019 or \u2018Woman\u2019. While previous work has analyzed such information in the context of interpretability, it is not clear how to use types to steer the PLM output. For example, in a cloze statement, it is desirable to steer the model to generate a token that satisfies a user-specified type, e.g., predict a date rather than a location. In this work, we introduce Type Embeddings (TEs), an input embedding that promotes desired types in a PLM. Our proposal is to define a type by a small set of word examples. We empirically study the ability of TEs both in representing types and in steering masking predictions without changes to the prompt text in BERT. Finally, using the LAMA datasets, we show how TEs highly improve the precision in extracting facts from PLMs.", + "author": "Mohammed Saeed; Paolo Papotti", + "authorids": "/m/mohammed-saeed/; /p/paolo-papotti/", + "bibtex": "@inproceedings{saeed-papotti-2022-type,\n title = \"You Are My Type! Type Embeddings for Pre-trained Language Models\",\n author = \"Saeed, Mohammed and\n Papotti, Paolo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.336/\",\n doi = \"10.18653/v1/2022.findings-emnlp.336\",\n pages = \"4583--4598\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.336.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.336/", + "pdf_size": 1418861, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5321182545965076223&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "EURECOM; EURECOM", + "aff_domain": "eurecom.fr;eurecom.fr", + "email": "eurecom.fr;eurecom.fr", + "github": "", + "project": "", + "author_num": 2, + "aff_unique_index": "0;0", + "aff_unique_norm": "EURECOM", + "aff_unique_dep": "", + "aff_unique_url": "https://www.eurecom.fr", + "aff_unique_abbr": "EURECOM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0", + "aff_country_unique": "France" + }, + { + "id": "2022.findings-emnlp.294", + "title": "You Are What You Talk About: Inducing Evaluative Topics for Personality Analysis", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Expressing attitude or stance toward entities and concepts is an integral part of human behavior and personality. Recently, evaluative language data has become more accessible with social media\u2019s rapid growth, enabling large-scale opinion analysis. However, surprisingly little research examines the relationship between personality and evaluative language. To bridge this gap, we introduce the notion of evaluative topics, obtained by applying topic models to pre-filtered evaluative text from social media. We then link evaluative topics to individual text authors to build their evaluative profiles. We apply evaluative profiling to Reddit comments labeled with personality scores and conduct an exploratory study on the relationship between evaluative topics and Big Five personality facets, aiming for a more interpretable, facet-level analysis. Finally, we validate our approach by observing correlations consistent with prior research in personality psychology.", + "author": "Josip Juki\u0107; Iva Vukojevi\u0107; Jan Snajder", + "authorids": "/j/josip-jukic/; /i/iva-vukojevic/; /j/jan-snajder/", + "bibtex": "@inproceedings{jukic-etal-2022-talk,\n title = \"You Are What You Talk About: Inducing Evaluative Topics for Personality Analysis\",\n author = \"Juki{\\'c}, Josip and\n Vukojevi{\\'c}, Iva and\n Snajder, Jan\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.294/\",\n doi = \"10.18653/v1/2022.findings-emnlp.294\",\n pages = \"3986--3999\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.294.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.294/", + "pdf_size": 423424, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=1119773454159835690&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 9, + "aff": "University of Zagreb, Faculty of Electrical Engineering and Computing; University of Zagreb, Faculty of Electrical Engineering and Computing; University of Zagreb, Faculty of Electrical Engineering and Computing", + "aff_domain": "fer.hr;fer.hr;fer.hr", + "email": "fer.hr;fer.hr;fer.hr", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Zagreb", + "aff_unique_dep": "Faculty of Electrical Engineering and Computing", + "aff_unique_url": "https://www.feezagreb.unizg.hr", + "aff_unique_abbr": "UNIZG FEEC", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "Croatia" + }, + { + "id": "2022.emnlp-main.198", + "title": "You Only Need One Model for Open-domain Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent approaches to Open-domain Question Answering refer to an external knowledge base using a retriever model, optionally rerank passages with a separate reranker model and generate an answer using another reader model. Despite performing related tasks, the models have separate parameters and are weakly-coupled during training. We propose casting the retriever and the reranker as internal passage-wise attention mechanisms applied sequentially within the transformer architecture and feeding computed representations to the reader, with the hidden representations progressively refined at each stage. This allows us to use a single question answering model trained end-to-end, which is a more efficient use of model capacity and also leads to better gradient flow. We present a pre-training method to effectively train this architecture and evaluate our model on the Natural Questions and TriviaQA open datasets. For a fixed parameter budget, our model outperforms the previous state-of-the-art model by 1.0 and 0.7 exact match scores.", + "author": "Haejun Lee; Akhil Kedia; Jongwon Lee; Ashwin Paranjape; Christopher Manning; Kyoung-Gu Woo", + "authorids": "/h/haejun-lee/; /a/akhil-kedia/; /j/jongwon-lee/; /a/ashwin-paranjape/; /c/christopher-d-manning/; /k/kyoung-gu-woo/", + "bibtex": "@inproceedings{lee-etal-2022-need,\n title = \"You Only Need One Model for Open-domain Question Answering\",\n author = \"Lee, Haejun and\n Kedia, Akhil and\n Lee, Jongwon and\n Paranjape, Ashwin and\n Manning, Christopher and\n Woo, Kyoung-Gu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.198/\",\n doi = \"10.18653/v1/2022.emnlp-main.198\",\n pages = \"3047--3060\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.198.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.198/", + "pdf_size": 352150, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2323936279397607785&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 3, + "aff": "Samsung Research; Samsung Research; Samsung Electronics; Stanford University; Stanford University; Growdle Corporation", + "aff_domain": "samsung.com;samsung.com;samsung.com;cs.stanford.edu;cs.stanford.edu;growdle.com", + "email": "samsung.com;samsung.com;samsung.com;cs.stanford.edu;cs.stanford.edu;growdle.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;1;2;2;3", + "aff_unique_norm": "Samsung Research;Samsung Electronics;Stanford University;Growdle Corporation", + "aff_unique_dep": ";;;", + "aff_unique_url": "https://research.samsung.com;https://www.samsung.com;https://www.stanford.edu;", + "aff_unique_abbr": "Samsung;Samsung;Stanford;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Stanford", + "aff_country_unique_index": "0;0;0;1;1", + "aff_country_unique": "South Korea;United States;" + }, + { + "id": "2022.findings-emnlp.75", + "title": "You Truly Understand What I Need : Intellectual and Friendly Dialog Agents grounding Persona and Knowledge", + "track": "main", + "status": "finding", + "award": false, + "abstract": "To build a conversational agent that interacts fluently with humans, previous studies blend knowledge or personal profile into the pre-trained language model. However, the model that considers knowledge and persona at the same time is still limited, leading to hallucination and a passive way of using personas. We propose an effective dialogue agent that grounds external knowledge and persona simultaneously. The agent selects the proper knowledge and persona to use for generating the answers with our candidate scoring implemented with a poly-encoder. Then, our model generates the utterance with lesser hallucination and more engagingness utilizing retrieval augmented generation with knowledge-persona enhanced query. We conduct experiments on the persona-knowledge chat and achieve state-of-the-art performance in grounding and generation tasks on the automatic metrics. Moreover, we validate the answers from the models regarding hallucination and engagingness through human evaluation and qualitative results. We show our retriever\u2019s effectiveness in extracting relevant documents compared to the other previous retrievers, along with the comparison of multiple candidate scoring methods. Code is available at https://github.com/dlawjddn803/INFO", + "author": "Jungwoo Lim; Myugnhoon Kang; Yuna Hur; Seung Won Jeong; Jinsung Kim; Yoonna Jang; Dongyub Lee; Hyesung Ji; DongHoon Shin; Seungryong Kim; Heuiseok Lim", + "authorids": "/j/jungwoo-lim/; /m/myugnhoon-kang/; /y/yuna-hur/; /s/seung-won-jeong/; /j/jinsung-kim/; /y/yoonna-jang/; /d/dongyub-lee/; /h/hyesung-ji/; /d/donghoon-shin/; /s/seungryong-kim/; /h/heui-seok-lim/", + "bibtex": "@inproceedings{lim-etal-2022-truly,\n title = \"You Truly Understand What {I} Need : Intellectual and Friendly Dialog Agents grounding Persona and Knowledge\",\n author = \"Lim, Jungwoo and\n Kang, Myugnhoon and\n Hur, Yuna and\n Jeong, Seung Won and\n Kim, Jinsung and\n Jang, Yoonna and\n Lee, Dongyub and\n Ji, Hyesung and\n Shin, DongHoon and\n Kim, Seungryong and\n Lim, Heuiseok\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.75/\",\n doi = \"10.18653/v1/2022.findings-emnlp.75\",\n pages = \"1053--1066\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.75.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.75/", + "pdf_size": 2517469, + "gs_citation": 10, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=4832452025842510977&as_sdt=5,44&sciodt=0,44&hl=en", + "gs_version_total": 3, + "aff": "Korea University; Korea University; Korea University; Korea University; Korea University; Korea University; Naver Corporation; Dialogue Tech Division, NCSOFT; Dialogue Tech Division, NCSOFT; Korea University; Korea University", + "aff_domain": "korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;navercorp.com;ncsoft.com;ncsoft.com;korea.ac.kr;korea.ac.kr", + "email": "korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;korea.ac.kr;navercorp.com;ncsoft.com;ncsoft.com;korea.ac.kr;korea.ac.kr", + "github": "https://github.com/dlawjddn803/INFO", + "project": "", + "author_num": 11, + "aff_unique_index": "0;0;0;0;0;0;1;2;2;0;0", + "aff_unique_norm": "Korea University;Naver Corporation;NCSOFT", + "aff_unique_dep": ";;Dialogue Tech Division", + "aff_unique_url": "https://www.korea.ac.kr;https://www.naver.com;https://www.ncsoft.com", + "aff_unique_abbr": "KU;Naver;NCSOFT", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "South Korea" + }, + { + "id": "2022.findings-emnlp.218", + "title": "You can\u2019t pick your neighbors, or can you? When and How to Rely on Retrieval in the kNN-LM", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Retrieval-enhanced language models (LMs), which condition their predictions on text retrieved from large external datastores, have recently shown significant perplexity improvements compared to standard LMs. One such approach, the kNN-LM, interpolates any existing LM\u2019s predictions with the output of a k-nearest neighbors model and requires no additional training. In this paper, we explore the importance of lexical and semantic matching in the context of items retrieved by kNN-LM. We find two trends: (1) the presence of large overlapping n-grams between the datastore and evaluation set plays an important factor in strong performance, even when the datastore is derived from the training data; and (2) the kNN-LM is most beneficial when retrieved items have high semantic similarity with the query. Based on our analysis, we define a new formulation of the kNN-LM that uses retrieval quality to assign the interpolation coefficient. We empirically measure the effectiveness of our approach on two English language modeling datasets, Wikitext-103 and PG-19. Our re-formulation of the kNN-LM is beneficial in both cases, and leads to nearly 4% improvement in perplexity on the Wikitext-103 test set.", + "author": "Andrew Drozdov; Shufan Wang; Razieh Rahimi; Andrew McCallum; Hamed Zamani; Mohit Iyyer", + "authorids": "/a/andrew-drozdov/; /s/shufan-wang/; /r/razieh-rahimi/; /a/andrew-mccallum/; /h/hamed-zamani/; /m/mohit-iyyer/", + "bibtex": "@inproceedings{drozdov-etal-2022-cant,\n title = \"You can`t pick your neighbors, or can you? When and How to Rely on Retrieval in the k{NN}-{LM}\",\n author = \"Drozdov, Andrew and\n Wang, Shufan and\n Rahimi, Razieh and\n McCallum, Andrew and\n Zamani, Hamed and\n Iyyer, Mohit\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.218/\",\n doi = \"10.18653/v1/2022.findings-emnlp.218\",\n pages = \"2997--3007\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.218.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.218/", + "pdf_size": 405845, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9914199678417094203&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Manning College of Information and Computer Sciences, University of Massachusetts Amherst; Manning College of Information and Computer Sciences, University of Massachusetts Amherst; Manning College of Information and Computer Sciences, University of Massachusetts Amherst; Manning College of Information and Computer Sciences, University of Massachusetts Amherst; Manning College of Information and Computer Sciences, University of Massachusetts Amherst; Manning College of Information and Computer Sciences, University of Massachusetts Amherst", + "aff_domain": "cs.umass.edu; ; ; ; ; ", + "email": "cs.umass.edu; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "University of Massachusetts Amherst", + "aff_unique_dep": "Manning College of Information and Computer Sciences", + "aff_unique_url": "https://www.umass.edu", + "aff_unique_abbr": "UMass Amherst", + "aff_campus_unique_index": "0;0;0;0;0;0", + "aff_campus_unique": "Amherst", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.78", + "title": "Z-LaVI: Zero-Shot Language Solver Fueled by Visual Imagination", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large-scale pretrained language models have made significant advances in solving downstream language understanding tasks. However, they generally suffer from reporting bias, the phenomenon describing the lack of explicit commonsense knowledge in written text, e.g., \u201dan orange is orange\u201d. To overcome this limitation, we develop a novel approach, Z-LaVI, to endow language models with visual imagination capabilities. Specifically, we leverage two complementary types of \u201dimaginations\u201d: (i) recalling existing images through retrieval and (ii) synthesizing nonexistent images via text-to-image generation. Jointly exploiting the language inputs and the imagination, a pretrained vision-language model (e.g., CLIP) eventually composes a zero-shot solution to the original language tasks. Notably, fueling language models with imagination can effectively leverage visual knowledge to solve plain language tasks. In consequence, Z-LaVI consistently improves the zero-shot performance of existing language models across a diverse set of language tasks.", + "author": "Yue Yang; Wenlin Yao; Hongming Zhang; Xiaoyang Wang; Dong Yu; Jianshu Chen", + "authorids": "/y/yue-yang/; /w/wenlin-yao/; /h/hongming-zhang/; /x/xiaoyang-wang/; /d/dong-yu/; /j/jianshu-chen/", + "bibtex": "@inproceedings{yang-etal-2022-z,\n title = \"{Z}-{L}a{VI}: Zero-Shot Language Solver Fueled by Visual Imagination\",\n author = \"Yang, Yue and\n Yao, Wenlin and\n Zhang, Hongming and\n Wang, Xiaoyang and\n Yu, Dong and\n Chen, Jianshu\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.78/\",\n doi = \"10.18653/v1/2022.emnlp-main.78\",\n pages = \"1186--1203\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.78.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.78/", + "pdf_size": 6657318, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=8221159645113361222&as_sdt=80000005&sciodt=0,23&hl=en", + "gs_version_total": 4, + "aff": "University of Pennsylvania; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab; Tencent AI Lab", + "aff_domain": "seas.upenn.edu;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com", + "email": "seas.upenn.edu;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com;global.tencent.com", + "github": "https://github.com/YueYANG1996/Z-LaVI1186", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;1;1;1;1", + "aff_unique_norm": "University of Pennsylvania;Tencent", + "aff_unique_dep": ";Tencent AI Lab", + "aff_unique_url": "https://www.upenn.edu;https://ai.tencent.com", + "aff_unique_abbr": "UPenn;Tencent AI Lab", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;1;1;1;1;1", + "aff_country_unique": "United States;China" + }, + { + "id": "2022.emnlp-industry.45", + "title": "Zero-Shot Dynamic Quantization for Transformer Inference", + "track": "main", + "status": "Industry", + "award": false, + "abstract": "We introduce a novel run-time method for significantly reducing the accuracy loss associated with quantizing BERT-like models to 8-bit integers. Existing methods for quantizing models either modify the training procedure, or they require an additional calibration step to adjust parameters that also requires a selected held-out dataset. Our method permits taking advantage of quantization without the need for these adjustments. We present results on several NLP tasks demonstrating the usefulness of this technique.", + "author": "Yousef El-kurdi; Jerry Quinn; Avi Sil", + "authorids": "/y/yousef-el-kurdi/; /j/jerry-quinn/; /a/avirup-sil/", + "bibtex": "@inproceedings{el-kurdi-etal-2022-zero,\n title = \"Zero-Shot Dynamic Quantization for Transformer Inference\",\n author = \"El-kurdi, Yousef and\n Quinn, Jerry and\n Sil, Avi\",\n editor = \"Li, Yunyao and\n Lazaridou, Angeliki\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: Industry Track\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, UAE\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-industry.45/\",\n doi = \"10.18653/v1/2022.emnlp-industry.45\",\n pages = \"451--457\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-industry.45.pdf", + "site": "https://aclanthology.org/2022.emnlp-industry.45/", + "pdf_size": 322023, + "gs_citation": 2, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9994928550373134958&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "IBM Research AI; IBM Research AI; IBM Research AI", + "aff_domain": "us.ibm.com;us.ibm.com;us.ibm.com", + "email": "us.ibm.com;us.ibm.com;us.ibm.com", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "IBM Research", + "aff_unique_dep": "AI", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.474", + "title": "Zero-Shot Learners for Natural Language Understanding via a Unified Multiple Choice Perspective", + "track": "main", + "status": "Main", + "award": false, + "abstract": "We propose a new paradigm for zero-shot learners that is format agnostic, i.e., it is compatible with any format and applicable to a list of language tasks, such as text classification, commonsense reasoning, coreference resolution, and sentiment analysis. Zero-shot learning aims to train a model on a given task such that it can address new learning tasks without any additional training. Our approach converts zero-shot learning into multiple-choice tasks, avoiding problems in commonly used large-scale generative models such as FLAN. It not only adds generalization ability to models but also significantly reduces the number of parameters. Our method shares the merits of efficient training and deployment. Our approach shows state-of-the-art performance on several benchmarks and produces satisfactory results on tasks such as natural language inference and text classification. Our model achieves this success with only 235M parameters, which is substantially smaller than state-of-the-art models with billions of parameters. The code and pre-trained models are available at https://github.com/IDEA-CCNL/Fengshenbang-LM/tree/main/fengshen/examples/unimc .", + "author": "Ping Yang; Junjie Wang; Ruyi Gan; Xinyu Zhu; Lin Zhang; Ziwei Wu; Xinyu Gao; Jiaxing Zhang; Tetsuya Sakai", + "authorids": "/p/ping-yang/; /j/junjie-wang/; /r/ruyi-gan/; /x/xinyu-zhu/; /l/lin-zhang/; /z/ziwei-wu/; /x/xinyu-gao/; /j/jiaxing-zhang/; /t/tetsuya-sakai/", + "bibtex": "@inproceedings{yang-etal-2022-zero,\n title = \"Zero-Shot Learners for Natural Language Understanding via a Unified Multiple Choice Perspective\",\n author = \"Yang, Ping and\n Wang, Junjie and\n Gan, Ruyi and\n Zhu, Xinyu and\n Zhang, Lin and\n Wu, Ziwei and\n Gao, Xinyu and\n Zhang, Jiaxing and\n Sakai, Tetsuya\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.474/\",\n doi = \"10.18653/v1/2022.emnlp-main.474\",\n pages = \"7042--7055\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.474.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.474/", + "pdf_size": 745847, + "gs_citation": 24, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=7302970962333992441&as_sdt=40005&sciodt=0,10&hl=en", + "gs_version_total": 10, + "aff": "International Digital Economy Academy; Waseda University; International Digital Economy Academy; Tsinghua University; International Digital Economy Academy; International Digital Economy Academy; International Digital Economy Academy; International Digital Economy Academy; Waseda University", + "aff_domain": "idea.edu.cn;toki.waseda.jp;idea.edu.cn;mails.tsinghua.edu.cn;idea.edu.cn;idea.edu.cn;idea.edu.cn;idea.edu.cn;acm.org", + "email": "idea.edu.cn;toki.waseda.jp;idea.edu.cn;mails.tsinghua.edu.cn;idea.edu.cn;idea.edu.cn;idea.edu.cn;idea.edu.cn;acm.org", + "github": "https://github.com/IDEA-CCNL/Fengshenbang-LM/tree/main/fengshen/examples/unimc", + "project": "", + "author_num": 9, + "aff_unique_index": "0;1;0;2;0;0;0;0;1", + "aff_unique_norm": "International Digital Economy Academy;Waseda University;Tsinghua University", + "aff_unique_dep": ";;", + "aff_unique_url": ";https://www.waseda.jp/top;https://www.tsinghua.edu.cn", + "aff_unique_abbr": ";Waseda;THU", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "1;2;1", + "aff_country_unique": ";Japan;China" + }, + { + "id": "2022.emnlp-main.73", + "title": "Zero-Shot Text Classification with Self-Training", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Recent advances in large pretrained language models have increased attention to zero-shot text classification. In particular, models finetuned on natural language inference datasets have been widely adopted as zero-shot classifiers due to their promising results and off-the-shelf availability. However, the fact that such models are unfamiliar with the target task can lead to instability and performance issues. We propose a plug-and-play method to bridge this gap using a simple self-training approach, requiring only the class names along with an unlabeled dataset, and without the need for domain expertise or trial and error. We show that fine-tuning the zero-shot classifier on its most confident predictions leads to significant performance gains across a wide range of text classification tasks, presumably since self-training adapts the zero-shot model to the task at hand.", + "author": "Ariel Gera; Alon Halfon; Eyal Shnarch; Yotam Perlitz; Liat Ein-Dor; Noam Slonim", + "authorids": "/a/ariel-gera/; /a/alon-halfon/; /e/eyal-shnarch/; /y/yotam-perlitz/; /l/liat-ein-dor/; /n/noam-slonim/", + "bibtex": "@inproceedings{gera-etal-2022-zero,\n title = \"Zero-Shot Text Classification with Self-Training\",\n author = \"Gera, Ariel and\n Halfon, Alon and\n Shnarch, Eyal and\n Perlitz, Yotam and\n Ein-Dor, Liat and\n Slonim, Noam\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.73/\",\n doi = \"10.18653/v1/2022.emnlp-main.73\",\n pages = \"1107--1119\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.73.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.73/", + "pdf_size": 400190, + "gs_citation": 66, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=5679890858405288289&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "IBM Research; IBM Research; IBM Research; IBM Research; IBM Research; IBM Research", + "aff_domain": "ibm.com;il.ibm.com;il.ibm.com;ibm.com;il.ibm.com;il.ibm.com", + "email": "ibm.com;il.ibm.com;il.ibm.com;ibm.com;il.ibm.com;il.ibm.com", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;0;0;0;0;0", + "aff_unique_norm": "IBM", + "aff_unique_dep": "IBM Research", + "aff_unique_url": "https://www.ibm.com/research", + "aff_unique_abbr": "IBM", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.790", + "title": "Zero-shot Cross-lingual Transfer of Prompt-based Tuning with a Unified Multilingual Prompt", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Prompt-based tuning has been proven effective for pretrained language models (PLMs). While most of the existing work focuses on the monolingual prompts, we study the multilingual prompts for multilingual PLMs, especially in the zero-shot cross-lingual setting. To alleviate the effort of designing different prompts for multiple languages, we propose a novel model that uses a unified prompt for all languages, called UniPrompt. Different from the discrete prompts and soft prompts, the unified prompt is model-based and language-agnostic. Specifically, the unified prompt is initialized by a multilingual PLM to produce language-independent representation, after which is fused with the text input. During inference, the prompts can be pre-computed so that no extra computation cost is needed. To collocate with the unified prompt, we propose a new initialization method for the target label word to further improve the model\u2019s transferability across languages. Extensive experiments show that our proposed methods can significantly outperform the strong baselines across different languages. We release data and code to facilitate future research.", + "author": "Lianzhe Huang; Shuming Ma; Dongdong Zhang; Furu Wei; Houfeng Wang", + "authorids": "/l/lianzhe-huang/; /s/shuming-ma/; /d/dongdong-zhang/; /f/furu-wei/; /h/houfeng-wang/", + "bibtex": "@inproceedings{huang-etal-2022-zero,\n title = \"Zero-shot Cross-lingual Transfer of Prompt-based Tuning with a Unified Multilingual Prompt\",\n author = \"Huang, Lianzhe and\n Ma, Shuming and\n Zhang, Dongdong and\n Wei, Furu and\n Wang, Houfeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.790/\",\n doi = \"10.18653/v1/2022.emnlp-main.790\",\n pages = \"11488--11497\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.790.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.790/", + "pdf_size": 1173899, + "gs_citation": 31, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17648572368882876201&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "MOE Key Lab of Computational Linguistics, Peking University\u2020; Microsoft Research Asia\u2021; Microsoft Research Asia\u2021; Microsoft Research Asia\u2021; MOE Key Lab of Computational Linguistics, Peking University\u2020", + "aff_domain": "pku.edu.cn;microsoft.com;microsoft.com;microsoft.com;pku.edu.cn", + "email": "pku.edu.cn;microsoft.com;microsoft.com;microsoft.com;pku.edu.cn", + "github": "https://github.com/mojave-pku/UniPrompt", + "project": "", + "author_num": 5, + "aff_unique_index": "0;1;1;1;0", + "aff_unique_norm": "Peking University;Microsoft Research Asia", + "aff_unique_dep": "MOE Key Lab of Computational Linguistics;Microsoft Research", + "aff_unique_url": "http://www.pku.edu.cn;https://www.microsoft.com/en-us/research/group/asia", + "aff_unique_abbr": "PKU;MSRA", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.801", + "title": "ZeroGen: Efficient Zero-shot Learning via Dataset Generation", + "track": "main", + "status": "Main", + "award": false, + "abstract": "There is a growing interest in dataset generation recently due to the superior generative capacity of large pre-trained language models (PLMs). In this paper, we study a flexible and efficient zero-short learning method, ZeroGen.Given a zero-shot task, we first generate a dataset from scratch using PLMs in an unsupervised manner. Then, we train a tiny task model (e.g., LSTM) under the supervision of the synthesized dataset. This approach allows highly efficient inference as the final task model only has orders of magnitude fewer parameters comparing to PLMs (e.g., GPT2-XL).Apart from being annotation-free and efficient, we argue that ZeroGen can also provide useful insights from the perspective of data-free model-agnostic knowledge distillation, and unreferenced text generation evaluation. Experiments and analysis on different NLP tasks, namely, text classification, question answering, and natural language inference, show the effectiveness of ZeroGen.", + "author": "Jiacheng Ye; Jiahui Gao; Qintong Li; Hang Xu; Jiangtao Feng; Zhiyong Wu; Tao Yu; Lingpeng Kong", + "authorids": "/j/jiacheng-ye/; /j/jiahui-gao/; /q/qintong-li/; /h/hang-xu/; /j/jiangtao-feng/; /z/zhiyong-wu/; /t/tao-yu/; /l/lingpeng-kong/", + "bibtex": "@inproceedings{ye-etal-2022-zerogen,\n title = \"{Z}ero{G}en: Efficient Zero-shot Learning via Dataset Generation\",\n author = \"Ye, Jiacheng and\n Gao, Jiahui and\n Li, Qintong and\n Xu, Hang and\n Feng, Jiangtao and\n Wu, Zhiyong and\n Yu, Tao and\n Kong, Lingpeng\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.801/\",\n doi = \"10.18653/v1/2022.emnlp-main.801\",\n pages = \"11653--11669\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.801.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.801/", + "pdf_size": 542875, + "gs_citation": 204, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=17392009296900150762&as_sdt=5,33&sciodt=0,33&hl=en", + "gs_version_total": 4, + "aff": ";;;;;;;", + "aff_domain": ";;;;;;;", + "email": ";;;;;;;", + "github": "", + "project": "", + "author_num": 8 + }, + { + "id": "2022.findings-emnlp.312", + "title": "ZeroPrompt: Scaling Prompt-Based Pretraining to 1,000 Tasks Improves Zero-Shot Generalization", + "track": "main", + "status": "finding", + "award": false, + "abstract": "We propose a multitask pretraining approach ZeroPrompt for zero-shot generalization, focusing on task scaling and zero-shot prompting.While previous models are trained on only a few dozen tasks, we scale to 1,000 tasks for the first time using real-world data. This leads to a crucial discovery that task scaling can be an efficient alternative to model scaling; i.e., the model size has less impact on performance with an extremely large number of tasks. Our results show that task scaling can improve training efficiency by 30 times in FLOPs.Empirically, ZeroPrompt substantially improves both the efficiency and the performance of zero-shot learning across a variety of academic and production datasets.", + "author": "Hanwei Xu; Yujun Chen; Yulun Du; Nan Shao; Wang Yanggang; Haiyu Li; Zhilin Yang", + "authorids": "/h/hanwei-xu/; /y/yujun-chen/; /y/yulun-du/; /n/nan-shao/; /w/wang-yanggang/; /h/haiyu-li/; /z/zhilin-yang/", + "bibtex": "@inproceedings{xu-etal-2022-zeroprompt,\n title = \"{Z}ero{P}rompt: Scaling Prompt-Based Pretraining to 1,000 Tasks Improves Zero-Shot Generalization\",\n author = \"Xu, Hanwei and\n Chen, Yujun and\n Du, Yulun and\n Shao, Nan and\n Yanggang, Wang and\n Li, Haiyu and\n Yang, Zhilin\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.312/\",\n doi = \"10.18653/v1/2022.findings-emnlp.312\",\n pages = \"4235--4252\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.312.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.312/", + "pdf_size": 779351, + "gs_citation": 28, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=16129155120399005556&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Recurrent AI; Recurrent AI; Recurrent AI; Recurrent AI; Recurrent AI; Recurrent AI; Recurrent AI", + "aff_domain": "rcrai.com;rcrai.com;rcrai.com;rcrai.com; ; ; ", + "email": "rcrai.com;rcrai.com;rcrai.com;rcrai.com; ; ; ", + "github": "", + "project": "", + "author_num": 7, + "aff_unique_index": "0;0;0;0;0;0;0", + "aff_unique_norm": "Recurrent AI", + "aff_unique_dep": "", + "aff_unique_url": "https://www.recurrent.ai", + "aff_unique_abbr": "Recurrent AI", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.641", + "title": "arXivEdits: Understanding the Human Revision Process in Scientific Writing", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Scientific publications are the primary means to communicate research discoveries, where the writing quality is of crucial importance. However, prior work studying the human editing process in this domain mainly focused on the abstract or introduction sections, resulting in an incomplete picture. In this work, we provide a complete computational framework for studying text revision in scientific writing. We first introduce arXivEdits, a new annotated corpus of 751 full papers from arXiv with gold sentence alignment across their multiple versions of revision, as well as fine-grained span-level edits and their underlying intentions for 1,000 sentence pairs. It supports our data-driven analysis to unveil the common strategies practiced by researchers for revising their papers. To scale up the analysis, we also develop automatic methods to extract revision at document-, sentence-, and word-levels. A neural CRF sentence alignment model trained on our corpus achieves 93.8 F1, enabling the reliable matching of sentences between different versions. We formulate the edit extraction task as a span alignment problem, and our proposed method extracts more fine-grained and explainable edits, compared to the commonly used diff algorithm. An intention classifier trained on our dataset achieves 78.9 F1 on the fine-grained intent classification task. Our data and system are released at tiny.one/arxivedits.", + "author": "Chao Jiang; Wei Xu; Samuel Stevens", + "authorids": "/c/chao-jiang/; /w/wei-xu/; /s/samuel-stevens/", + "bibtex": "@inproceedings{jiang-etal-2022-arxivedits,\n title = \"ar{X}iv{E}dits: Understanding the Human Revision Process in Scientific Writing\",\n author = \"Jiang, Chao and\n Xu, Wei and\n Stevens, Samuel\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.641/\",\n doi = \"10.18653/v1/2022.emnlp-main.641\",\n pages = \"9420--9435\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.641.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.641/", + "pdf_size": 1918678, + "gs_citation": 23, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=3891705705530805321&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 6, + "aff": "School of Interactive Computing, Georgia Institute of Technology; School of Interactive Computing, Georgia Institute of Technology; Department of Computer Science and Engineering, Ohio State University", + "aff_domain": "gatech.edu;cc.gatech.edu;osu.edu", + "email": "gatech.edu;cc.gatech.edu;osu.edu", + "github": "", + "project": "tiny.one/arxivedits", + "author_num": 3, + "aff_unique_index": "0;0;1", + "aff_unique_norm": "Georgia Institute of Technology;Ohio State University", + "aff_unique_dep": "School of Interactive Computing;Department of Computer Science and Engineering", + "aff_unique_url": "https://www.gatech.edu;https://www.osu.edu", + "aff_unique_abbr": "Georgia Tech;OSU", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Atlanta;", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.findings-emnlp.315", + "title": "m4 Adapter: Multilingual Multi-Domain Adaptation for Machine Translation with a Meta-Adapter", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Multilingual neural machine translation models (MNMT) yield state-of-the-art performance when evaluated on data from a domain and language pair seen at training time. However, when a MNMT model is used to translate under domain shift or to a new language pair, performance drops dramatically. We consider a very challenging scenario: adapting the MNMT model both to a new domain and to a new language pair at the same time. In this paper, we propose m^4Adapter (Multilingual Multi-Domain Adaptation for Machine Translation with a Meta-Adapter), which combines domain and language knowledge using meta-learning with adapters. We present results showing that our approach is a parameter-efficient solution which effectively adapts a model to both a new language pair and a new domain, while outperforming other adapter methods. An ablation study also shows that our approach more effectively transfers domain knowledge across different languages and language information across different domains.", + "author": "Wen Lai; Alexandra Chronopoulou; Alexander Fraser", + "authorids": "/w/wen-lai/; /a/alexandra-chronopoulou/; /a/alexander-fraser/", + "bibtex": "@inproceedings{lai-etal-2022-m4,\n title = \"m$^4$ Adapter: Multilingual Multi-Domain Adaptation for Machine Translation with a Meta-Adapter\",\n author = \"Lai, Wen and\n Chronopoulou, Alexandra and\n Fraser, Alexander\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.315/\",\n doi = \"10.18653/v1/2022.findings-emnlp.315\",\n pages = \"4282--4296\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.315.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.315/", + "pdf_size": 1070594, + "gs_citation": 16, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=9129023999215478036&as_sdt=5,34&sciodt=0,34&hl=en", + "gs_version_total": 10, + "aff": "Center for Information and Language Processing, LMU Munich, Germany+Munich Center for Machine Learning, Germany; Center for Information and Language Processing, LMU Munich, Germany+Munich Center for Machine Learning, Germany; Center for Information and Language Processing, LMU Munich, Germany+Munich Center for Machine Learning, Germany", + "aff_domain": "cis.lmu.de;cis.lmu.de;cis.lmu.de", + "email": "cis.lmu.de;cis.lmu.de;cis.lmu.de", + "github": "https://github.com/lavine-lmu/m4Adapter", + "project": "", + "author_num": 3, + "aff_unique_index": "0+1;0+1;0+1", + "aff_unique_norm": "LMU Munich;Munich Center for Machine Learning", + "aff_unique_dep": "Center for Information and Language Processing;", + "aff_unique_url": "https://www.lmu.de;", + "aff_unique_abbr": "LMU;", + "aff_campus_unique_index": "0;0;0", + "aff_campus_unique": "Munich;", + "aff_country_unique_index": "0+0;0+0;0+0", + "aff_country_unique": "Germany" + }, + { + "id": "2022.emnlp-main.488", + "title": "mPLUG: Effective and Efficient Vision-Language Learning by Cross-modal Skip-connections", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Large-scale pre-trained foundation models have been an emerging paradigm for building artificial intelligence (AI) systems, which can be quickly adapted to a wide range of downstream tasks. This paper presents mPLUG, a new vision-language foundation model for both cross-modal understanding and generation. Most existing pre-trained models suffer from inefficiency and linguistic signal overwhelmed by long visual sequences in cross-modal alignment. To address both problems, mPLUG introduces an effective and efficient vision-language architecture with novel cross-modal skip-connections.mPLUG is pre-trained end-to-end on large-scale image-text pairs with both discriminative and generative objectives. It achieves state-of-the-art results on a wide range of vision-language downstream tasks, including image captioning, image-text retrieval, visual grounding and visual question answering. mPLUG also demonstrates strong zero-shot transferability on vision-language and video-language tasks. The code and pre-trained models are available at https://github.com/alibaba/AliceMind", + "author": "Chenliang Li; Haiyang Xu; Junfeng Tian; Wei Wang; Ming Yan; Bin Bi; Jiabo Ye; He Chen; Guohai Xu; Zheng Cao; Ji Zhang; Songfang Huang; Fei Huang; Jingren Zhou; Luo Si", + "authorids": "/c/chenliang-li/; /h/haiyang-xu/; /j/junfeng-tian/; /w/wei-wang/; /m/ming-yan/; /b/bin-bi/; /j/jiabo-ye/; /h/he-chen/; /g/guohai-xu/; /z/zheng-cao/; /j/ji-zhang/; /s/songfang-huang/; /f/fei-huang/; /j/jingren-zhou/; /l/luo-si/", + "bibtex": "@inproceedings{li-etal-2022-mplug,\n title = \"m{PLUG}: Effective and Efficient Vision-Language Learning by Cross-modal Skip-connections\",\n author = \"Li, Chenliang and\n Xu, Haiyang and\n Tian, Junfeng and\n Wang, Wei and\n Yan, Ming and\n Bi, Bin and\n Ye, Jiabo and\n Chen, He and\n Xu, Guohai and\n Cao, Zheng and\n Zhang, Ji and\n Huang, Songfang and\n Huang, Fei and\n Zhou, Jingren and\n Si, Luo\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.488/\",\n doi = \"10.18653/v1/2022.emnlp-main.488\",\n pages = \"7241--7259\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.488.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.488/", + "pdf_size": 10234407, + "gs_citation": 145, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13837127334268740357&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group; DAMO Academy, Alibaba Group", + "aff_domain": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "email": "alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com;alibaba-inc.com", + "github": "https://github.com/alibaba/AliceMind", + "project": "", + "author_num": 15, + "aff_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0;0;0;0", + "aff_unique_norm": "Alibaba Group", + "aff_unique_dep": "DAMO Academy", + "aff_unique_url": "https://www.alibaba-group.com", + "aff_unique_abbr": "Alibaba", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0;0;0;0;0;0;0;0;0;0;0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.485", + "title": "monoQA: Multi-Task Learning of Reranking and Answer Extraction for Open-Retrieval Conversational Question Answering", + "track": "main", + "status": "Main", + "award": false, + "abstract": "To address the Conversational Question Answering (ORConvQA) task, previous work has considered an effective three-stage architecture, consisting of a retriever, a reranker, and a reader to extract the answers. In order to effectively answer the users\u2019 questions, a number of existing approaches have applied multi-task learning, such that the same model is shared between the reranker and the reader. Such approaches also typically tackle reranking and reading as classification tasks. On the other hand, recent text generation models, such as monoT5 and UnifiedQA, have been shown to respectively yield impressive performances in passage reranking and reading. However, no prior work has combined monoT5 and UnifiedQA to share a single text generation model that directly extracts the answers for the users instead of predicting the start/end positions in a retrieved passage. In this paper, we investigate the use of Multi-Task Learning (MTL) to improve performance on the ORConvQA task by sharing the reranker and reader\u2019s learned structure in a generative model. In particular, we propose monoQA, which uses a text generation model with multi-task learning for both the reranker and reader. Our model, which is based on the T5 text generation model, is fine-tuned simultaneously for both reranking (in order to improve the precision of the top retrieved passages) and extracting the answer. Our results on the OR-QuAC and OR-CoQA datasets demonstrate the effectiveness of our proposed model, which significantly outperforms existing strong baselines with improvements ranging from +12.31% to +19.51% in MAP and from +5.70% to +23.34% in F1 on all used test sets.", + "author": "Sarawoot Kongyoung; Craig Macdonald; Iadh Ounis", + "authorids": "/s/sarawoot-kongyoung/; /c/craig-macdonald/; /i/iadh-ounis/", + "bibtex": "@inproceedings{kongyoung-etal-2022-monoqa,\n title = \"mono{QA}: Multi-Task Learning of Reranking and Answer Extraction for Open-Retrieval Conversational Question Answering\",\n author = \"Kongyoung, Sarawoot and\n Macdonald, Craig and\n Ounis, Iadh\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.485/\",\n doi = \"10.18653/v1/2022.emnlp-main.485\",\n pages = \"7207--7218\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.485.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.485/", + "pdf_size": 447994, + "gs_citation": 8, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=13243028799135089002&as_sdt=5,24&sciodt=0,24&hl=en", + "gs_version_total": 4, + "aff": "University of Glasgow, UK; University of Glasgow, UK; University of Glasgow, UK", + "aff_domain": "research.gla.ac.uk;glasgow.ac.uk;glasgow.ac.uk", + "email": "research.gla.ac.uk;glasgow.ac.uk;glasgow.ac.uk", + "github": "", + "project": "", + "author_num": 3, + "aff_unique_index": "0;0;0", + "aff_unique_norm": "University of Glasgow", + "aff_unique_dep": "", + "aff_unique_url": "https://www.gla.ac.uk", + "aff_unique_abbr": "Glasgow", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0", + "aff_country_unique": "United Kingdom" + }, + { + "id": "2022.emnlp-main.711", + "title": "\u201cCovid vaccine is against Covid but Oxford vaccine is made at Oxford!\u201d Semantic Interpretation of Proper Noun Compounds", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Proper noun compounds, e.g., \u201cCovid vaccine\u201d, convey information in a succinct manner (a \u201cCovid vaccine\u201d is a \u201cvaccine that immunizes against the Covid disease\u201d). These are commonly used in short-form domains, such as news headlines, but are largely ignored in information-seeking applications. To address this limitation, we release a new manually annotated dataset, ProNCI, consisting of 22.5K proper noun compounds along with their free-form semantic interpretations. ProNCI is 60 times larger than prior noun compound datasets and also includes non-compositional examples, which have not been previously explored. We experiment with various neural models for automatically generating the semantic interpretations from proper noun compounds, ranging from few-shot prompting to supervised learning, with varying degrees of knowledge about the constituent nouns. We find that adding targeted knowledge, particularly about the common noun, results in performance gains of upto 2.8%. Finally, we integrate our model generated interpretations with an existing Open IE system and observe an 7.5% increase in yield at a precision of 85%. The dataset and code are available at https://github.com/dair-iitd/pronci.", + "author": "Keshav Kolluru; Gabriel Stanovsky; Mausam -", + "authorids": "/k/keshav-kolluru/; /g/gabriel-stanovsky/; /m/mausam/", + "bibtex": "@inproceedings{kolluru-etal-2022-covid,\n title = \"{\\textquotedblleft}Covid vaccine is against Covid but {O}xford vaccine is made at {O}xford!{\\textquotedblright} Semantic Interpretation of Proper Noun Compounds\",\n author = \"Kolluru, Keshav and\n Stanovsky, Gabriel and\n -, Mausam\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.711/\",\n doi = \"10.18653/v1/2022.emnlp-main.711\",\n pages = \"10407--10420\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.711.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.711/", + "pdf_size": 538571, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6654710629482629152&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 5, + "aff": "Indian Institute of Technology Delhi; The Hebrew University of Jerusalem; Indian Institute of Technology Delhi", + "aff_domain": "gmail.com;mail.huji.ac.il;cse.iitd.ac.in", + "email": "gmail.com;mail.huji.ac.il;cse.iitd.ac.in", + "github": "https://github.com/dair-iitd/pronci", + "project": "", + "author_num": 3, + "aff_unique_index": "0;1;0", + "aff_unique_norm": "Indian Institute of Technology Delhi;The Hebrew University of Jerusalem", + "aff_unique_dep": ";", + "aff_unique_url": "https://www.iitd.ac.in;https://www.huji.ac.il", + "aff_unique_abbr": "IIT Delhi;HUJI", + "aff_campus_unique_index": "0;0", + "aff_campus_unique": "Delhi;", + "aff_country_unique_index": "0;1;0", + "aff_country_unique": "India;Israel" + }, + { + "id": "2022.findings-emnlp.212", + "title": "\u201cI Know Who You Are\u201d: Character-Based Features for Conversational Humor Recognition in Chinese", + "track": "main", + "status": "finding", + "award": false, + "abstract": "Humor plays an important role in our daily life, as it is an essential and fascinating element in the communication between persons. Therefore, how to recognize punchlines from the dialogue, i.e. conversational humor recognition, has attracted much interest of computational linguistics communities. However, most existing work attempted to understand the conversational humor by analyzing the contextual information of the dialogue, but neglected the character of the interlocutor, such as age, gender, occupation, and so on. For instance, the same utterance could bring out humorous from a serious person, but may be a plain expression from a naive person. To this end, this paper proposes a Character Fusion Conversational Humor Recognition model (CFCHR) to explore character information to recognize conversational humor. CFCHR utilizes a multi-task learning framework that unifies two highly pertinent tasks, i.e., character extraction and punchline identification. Based on deep neural networks, we trained both tasks jointly by sharing weight to extract the common and task-invariant features while each task could still learn its task-specific features. Experiments were conducted on Chinese sitcoms corpus, which consisted of 12,677 utterances from 22 characters. The experimental results demonstrated that CFCHR could achieve 33.08% improvements in terms of F1-score over some strong baselines, and proved the effectiveness of the character information to identify the punchlines.", + "author": "Wenbo Shang; Jiangjiang Zhao; Zezhong Wang; Binyang Li; Fangchun Yang; Kam-Fai Wong", + "authorids": "/w/wenbo-shang/; /j/jiangjiang-zhao/; /z/zezhong-wang/; /b/binyang-li/; /f/fangchun-yang/; /k/kam-fai-wong/", + "bibtex": "@inproceedings{shang-etal-2022-know,\n title = \"{\\textquotedblleft}{I} Know Who You Are{\\textquotedblright}: Character-Based Features for Conversational Humor Recognition in {C}hinese\",\n author = \"Shang, Wenbo and\n Zhao, Jiangjiang and\n Wang, Zezhong and\n Li, Binyang and\n Yang, Fangchun and\n Wong, Kam-Fai\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2022\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.findings-emnlp.212/\",\n doi = \"10.18653/v1/2022.findings-emnlp.212\",\n pages = \"2927--2932\"\n}", + "pdf": "https://aclanthology.org/2022.findings-emnlp.212.pdf", + "site": "https://aclanthology.org/2022.findings-emnlp.212/", + "pdf_size": 444375, + "gs_citation": 1, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=11262991278008533967&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 0, + "aff": "University of International Relations, China; Beijing University of Posts and Telecommunications, China; The Chinese University of Hong Kong, Hong Kong, China + MoE Key Laboratory of High Confidence Software Technologies, China; University of International Relations, China; Beijing University of Posts and Telecommunications, China; The Chinese University of Hong Kong, Hong Kong, China + MoE Key Laboratory of High Confidence Software Technologies, China", + "aff_domain": "uir.edu.cn;uir.edu.cn;bupt.edu.cn;bupt.edu.cn;se.cuhk.edu.hk;se.cuhk.edu.hk", + "email": "uir.edu.cn;uir.edu.cn;bupt.edu.cn;bupt.edu.cn;se.cuhk.edu.hk;se.cuhk.edu.hk", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2+3;0;1;2+3", + "aff_unique_norm": "University of International Relations;Beijing University of Posts and Telecommunications;The Chinese University of Hong Kong;MoE Key Laboratory of High Confidence Software Technologies", + "aff_unique_dep": ";;;High Confidence Software Technologies", + "aff_unique_url": ";http://www.bupt.edu.cn/;https://www.cuhk.edu.hk;", + "aff_unique_abbr": ";BUPT;CUHK;", + "aff_campus_unique_index": "1;1", + "aff_campus_unique": ";Hong Kong", + "aff_country_unique_index": "0;0;0+0;0;0;0+0", + "aff_country_unique": "China" + }, + { + "id": "2022.emnlp-main.553", + "title": "\u201cIt\u2019s Not Just Hate\u201d: A Multi-Dimensional Perspective on Detecting Harmful Speech Online", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Well-annotated data is a prerequisite for good Natural Language Processing models. Too often, though, annotation decisions are governed by optimizing time or annotator agreement. We make a case for nuanced efforts in an interdisciplinary setting for annotating offensive online speech. Detecting offensive content is rapidly becoming one of the most important real-world NLP tasks. However, most datasets use a single binary label, e.g., for hate or incivility, even though each concept is multi-faceted. This modeling choice severely limits nuanced insights, but also performance.We show that a more fine-grained multi-label approach to predicting incivility and hateful or intolerant content addresses both conceptual and performance issues.We release a novel dataset of over 40,000 tweets about immigration from the US and UK, annotated with six labels for different aspects of incivility and intolerance.Our dataset not only allows for a more nuanced understanding of harmful speech online, models trained on it also outperform or match performance on benchmark datasets", + "author": "Federico Bianchi; Stefanie HIlls; Patricia Rossini; Dirk Hovy; Rebekah Tromble; Nava Tintarev", + "authorids": "/f/federico-bianchi/; /s/stefanie-hills/; /p/patricia-rossini/; /d/dirk-hovy/; /r/rebekah-tromble/; /n/nava-tintarev/", + "bibtex": "@inproceedings{bianchi-etal-2022-just,\n title = \"{\\textquotedblleft}It`s Not Just Hate{\\textquotedblright}: A Multi-Dimensional Perspective on Detecting Harmful Speech Online\",\n author = \"Bianchi, Federico and\n HIlls, Stefanie and\n Rossini, Patricia and\n Hovy, Dirk and\n Tromble, Rebekah and\n Tintarev, Nava\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.553/\",\n doi = \"10.18653/v1/2022.emnlp-main.553\",\n pages = \"8093--8099\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.553.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.553/", + "pdf_size": 186126, + "gs_citation": 21, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=14653628033544314223&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 8, + "aff": "Stanford University; University of Stirling; University of Glasgow; Bocconi University; George Washington University; Maastricht University", + "aff_domain": "; ; ; ; ; ", + "email": "; ; ; ; ; ", + "github": "", + "project": "", + "author_num": 6, + "aff_unique_index": "0;1;2;3;4;5", + "aff_unique_norm": "Stanford University;University of Stirling;University of Glasgow;Bocconi University;George Washington University;Maastricht University", + "aff_unique_dep": ";;;;;", + "aff_unique_url": "https://www.stanford.edu;https://www.stirling.ac.uk;https://www.gla.ac.uk;https://www.bocconi.edu;https://www.gwu.edu;https://www.maastrichtuniversity.nl", + "aff_unique_abbr": "Stanford;Stirling;Glasgow;Bocconi;GWU;MU", + "aff_campus_unique_index": "0", + "aff_campus_unique": "Stanford;", + "aff_country_unique_index": "0;1;1;2;0;3", + "aff_country_unique": "United States;United Kingdom;Italy;Netherlands" + }, + { + "id": "2022.emnlp-main.625", + "title": "\u201cI\u2019m sorry to hear that\u201d: Finding New Biases in Language Models with a Holistic Descriptor Dataset", + "track": "main", + "status": "Main", + "award": false, + "abstract": "As language models grow in popularity, it becomes increasingly important to clearly measure all possible markers of demographic identity in order to avoid perpetuating existing societal harms. Many datasets for measuring bias currently exist, but they are restricted in their coverage of demographic axes and are commonly used with preset bias tests that presuppose which types of biases models can exhibit. In this work, we present a new, more inclusive bias measurement dataset, HolisticBias, which includes nearly 600 descriptor terms across 13 different demographic axes. HolisticBias was assembled in a participatory process including experts and community members with lived experience of these terms. These descriptors combine with a set of bias measurement templates to produce over 450,000 unique sentence prompts, which we use to explore, identify, and reduce novel forms of bias in several generative models. We demonstrate that HolisticBias is effective at measuring previously undetectable biases in token likelihoods from language models, as well as in an offensiveness classifier. We will invite additions and amendments to the dataset, which we hope will serve as a basis for more easy-to-use and standardized methods for evaluating bias in NLP models.", + "author": "Eric Michael Smith; Melissa Hall; Melanie Kambadur; Eleonora Presani; Adina Williams", + "authorids": "/e/eric-michael-smith/; /m/melissa-hall/; /m/melanie-kambadur/; /e/eleonora-presani/; /a/adina-williams/", + "bibtex": "@inproceedings{smith-etal-2022-im,\n title = \"{\\textquotedblleft}{I}`m sorry to hear that{\\textquotedblright}: Finding New Biases in Language Models with a Holistic Descriptor Dataset\",\n author = \"Smith, Eric Michael and\n Hall, Melissa and\n Kambadur, Melanie and\n Presani, Eleonora and\n Williams, Adina\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.625/\",\n doi = \"10.18653/v1/2022.emnlp-main.625\",\n pages = \"9180--9211\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.625.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.625/", + "pdf_size": 3266930, + "gs_citation": 162, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=2061076546879875718&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 3, + "aff": "Meta AI; Meta AI; Meta AI; Meta AI; Meta AI", + "aff_domain": "fb.com;fb.com;fb.com;fb.com;fb.com", + "email": "fb.com;fb.com;fb.com;fb.com;fb.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Meta Platforms, Inc.", + "aff_unique_dep": "Meta AI", + "aff_unique_url": "https://meta.com", + "aff_unique_abbr": "Meta", + "aff_campus_unique_index": "", + "aff_campus_unique": "", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + }, + { + "id": "2022.emnlp-main.64", + "title": "\u201cWill You Find These Shortcuts?\u201d A Protocol for Evaluating the Faithfulness of Input Salience Methods for Text Classification", + "track": "main", + "status": "Main", + "award": false, + "abstract": "Feature attribution a.k.a. input salience methods which assign an importance score to a feature are abundant but may produce surprisingly different results for the same model on the same input. While differences are expected if disparate definitions of importance are assumed, most methods claim to provide faithful attributions and point at the features most relevant for a model\u2019s prediction. Existing work on faithfulness evaluation is not conclusive and does not provide a clear answer as to how different methods are to be compared.Focusing on text classification and the model debugging scenario, our main contribution is a protocol for faithfulness evaluation that makes use of partially synthetic data to obtain ground truth for feature importance ranking. Following the protocol, we do an in-depth analysis of four standard salience method classes on a range of datasets and lexical shortcuts for BERT and LSTM models. We demonstrate that some of the most popular method configurations provide poor results even for simple shortcuts while a method judged to be too simplistic works remarkably well for BERT.", + "author": "Jasmijn Bastings; Sebastian Ebert; Polina Zablotskaia; Anders Sandholm; Katja Filippova", + "authorids": "/j/jasmijn-bastings/; /s/sebastian-ebert/; /p/polina-zablotskaia/; /a/anders-sandholm/; /k/katja-filippova/", + "bibtex": "@inproceedings{bastings-etal-2022-will,\n title = \"{\\textquotedblleft}Will You Find These Shortcuts?{\\textquotedblright} A Protocol for Evaluating the Faithfulness of Input Salience Methods for Text Classification\",\n author = \"Bastings, Jasmijn and\n Ebert, Sebastian and\n Zablotskaia, Polina and\n Sandholm, Anders and\n Filippova, Katja\",\n editor = \"Goldberg, Yoav and\n Kozareva, Zornitsa and\n Zhang, Yue\",\n booktitle = \"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing\",\n month = dec,\n year = \"2022\",\n address = \"Abu Dhabi, United Arab Emirates\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2022.emnlp-main.64/\",\n doi = \"10.18653/v1/2022.emnlp-main.64\",\n pages = \"976--991\"\n}", + "pdf": "https://aclanthology.org/2022.emnlp-main.64.pdf", + "site": "https://aclanthology.org/2022.emnlp-main.64/", + "pdf_size": 428548, + "gs_citation": 69, + "gs_cited_by_link": "https://scholar.google.com/scholar?cites=6409788713413424919&as_sdt=2005&sciodt=0,5&hl=en", + "gs_version_total": 4, + "aff": "Google Research; Google Research; Google Research; Google Research; Google Research", + "aff_domain": "google.com;google.com;google.com;google.com;google.com", + "email": "google.com;google.com;google.com;google.com;google.com", + "github": "", + "project": "", + "author_num": 5, + "aff_unique_index": "0;0;0;0;0", + "aff_unique_norm": "Google", + "aff_unique_dep": "Google Research", + "aff_unique_url": "https://research.google", + "aff_unique_abbr": "Google Research", + "aff_campus_unique_index": "0;0;0;0;0", + "aff_campus_unique": "Mountain View", + "aff_country_unique_index": "0;0;0;0;0", + "aff_country_unique": "United States" + } +] \ No newline at end of file