| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[,],:,:[,]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[,],:,:[,]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[,],:,:[,]} |
| {:,:,:[],:,:[]} |
| {:,:,:[,],:,:[,]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[,],:,:[,]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[,],:,:[,]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[,]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[],:,:[]} |
| {:,:,:[ On the Move to Meaningful Internet …, 2019G Wohlgenannt - On the Move to Meaningful Internet Systems: OTM …snippet… First, we tested large-scale pre-trained language models trained with generalpurpose text corpora such as GoogleNews and the CommonCrawl, but as expected both performed badly on the legal dataset, for example the Common …urlhttp://books.google.de/books?hl=en&lr=lang_en&id=hm21DwAAQBAJ&oi=fnd&pg=PA164&dq=commoncrawl&ots=pdUzWNZpkR&sig=nBv58MNJuj5jkkROfHpNIYLbyTshttps://link.springer.com/chapter/10.1007/978-3-030-33246-4_10 |
| year2019titleExploiting knowledge graphs for entity-centric predictionauthorsS Jiang - 2018snippetPage 1. © 2018 Shan Jiang Page 2. EXPLOITING KNOWLEDGE GRAPHS FOR ENTITY-CENTRIC PREDICTION BY SHAN JIANG DISSERTATION Submitted in partial fulfillment of the requirements for the degree of Doctor of …urlhttps://www.ideals.illinois.edu/bitstream/handle/2142/102463/JIANG-DISSERTATION-2018.pdf?sequence=1 |
| year2019titleExploiting Temporal Relationships in Video Moment Localization with Natural LanguageauthorsS Zhang, J Su, J Luo - arXiv preprint arXiv:1908.03846, 2019snippet… extracted from VGG [24] fc7 layer, optical flow features are extracted from the penultimate layer [27] and the 300-d Glove feature [21] pretrained on Common Crawl (42 billion tokens) are used as the word embedding. The segment …urlhttps://arxiv.org/pdf/1908.03846 |
| year2019titleExploiting the Hierarchical Structure of a Thesaurus for Document ClassificationauthorsE Filtz, S Kirrane, A Polleres, G Wohlgenanntsnippet… First, we tested large-scale pre-trained language models trained with generalpropose text corpora such as GoogleNews and the CommonCrawl, but as ex- pected both performed badly on the legal dataset, for example the …urlhttps://aic.ai.wu.ac.at/~polleres/publications/filt-etal-2019COOPIS.pdf |
| year2019titleExplore FREDDYauthorsM Günther, M Thiele, W Lehner, Z Yanakiev - BTW 2019, 2019snippet… The configuration of the search function is defined in the sidebar (Figure 3b) just as in the query view. 4 Screencast on our FREDDY website https://wwwdb.inf.tu-dresden.de/research-projects/freddy/ 5 https://dblp.uni …urlhttps://dl.gi.de/bitstream/handle/20.500.12116/21558/E08-1.pdf?sequence=1&isAllowed=y |
| year2019titleExplore FREDDY: Fast Word Embeddings in Database SystemsauthorsM Günther, Z Yanakiev, M Thiele, W Lehnersnippet… The configuration of the search function is defined in the sidebar (Figure 3b) just as in the query view. 4 Screencast on our FREDDY website https://wwwdb.inf.tu-dresden.de/research-projects/freddy/ 5 https://dblp.uni …urlhttps://btw.informatik.uni-rostock.de/download/tagungsband/E08-1.pdf |
| year2019titleExploring Numeracy in Word EmbeddingsauthorsA Naik, A Ravichander, C Rose, E Hovy - Proceedings of the 57th Conference of the …, 2019snippet… FastText (Bojanowski et al., 2017): Extended Skipgram model representing words as character n-grams to incorporate sub-word information. We evaluate Wikipedia and Common Crawl variants. 3.1 Retrained Word Vectors …urlhttps://www.aclweb.org/anthology/P19-1329 |
| year2019titleExploring the Limits of Transfer Learning with a Unified Text-to-Text TransformerauthorsC Raffel, N Shazeer, A Roberts, K Lee, S Narang… - arXiv preprint arXiv …, 2019snippet… unsupervised pre-training for NLP is particularly attractive because unlabeled text data is available en masse thanks to the Internet – for example, the Common Crawl project2 produces about 20TB of text data extracted from web pages each month …urlhttps://arxiv.org/pdf/1910.10683 |
| year2019titleExtending Cross-Domain Knowledge Bases with Long Tail Entities using Web Table DataauthorsY Oulabi, C Bizer - genre, 2019snippet… In a second experiment, we apply the system to a large corpus of web tables extracted from the Common Crawl. This experiment allows us to get an overall im- pression of the potential of web tables for augmenting knowledge bases with long tail entities …urlhttps://www.uni-mannheim.de/media/Einrichtungen/dws/Files_Research/Web-based_Systems/pub/OulabiBizer-LongTailEntities-EDBT2019.pdf |
| year2019titleExtracting and Analyzing Context Information in User-Support Conversations on TwitterauthorsD Martens, W Maalej - arXiv preprint arXiv:1907.13395, 2019snippet… As the list of marketing names also includes common words (eg, 'five', 'go', or 'plus'), we used the natural language processing library spaCy [33] to remove words that appear in the vocabulary of the included …urlhttps://arxiv.org/pdf/1907.13395 |
| year2019titleExtracting Novel Facts from Tables for Knowledge Graph Completion (Extended version)authorsB Kruit, P Boncz, J Urbani - arXiv preprint arXiv:1907.00083, 2019snippet… The first one is the T2D dataset [23], which contains a subset of the WDC Web Tables Corpus – a set of tables extracted from the CommonCrawl web scrape6. We use the latest available version of this dataset (v2, released 2017/02). In …urlhttps://arxiv.org/pdf/1907.00083 |
| year2019titleExtracting Novel Facts from Tables for Knowledge Graph CompletionauthorsB Kruit, P Boncz, J Urbani - International Semantic Web Conference, 2019snippet… The first one is the T2D dataset [25], which contains a subset of the WDC Web Tables Corpus – a set of tables extracted from the CommonCrawl web scrape 2 . We use the latest available version of this dataset (v2, released 2017/02) …urlhttps://link.springer.com/chapter/10.1007/978-3-030-30793-6_21 |
| year2019titleFacebook AI's WAT19 Myanmar-English Translation Task Submission","authors":["PJ Chen, J Shen, M Le, V Chaudhary, A El-Kishky… - arXiv preprint arXiv …, 2019"],"snippet":"… For Myanmar language, we take five snapshots of the Commoncrawl dataset and combine them with the raw data from Buck et al. (2014) … The Myanmar monolingual data we collect from Commoncrawl contains text in both Unicode and Zawgyi encodings …","url":["https://arxiv.org/pdf/1910.06848"]} |
| {"year":"2019","title":"Facebook FAIR's WMT19 News Translation Task SubmissionauthorsN Ng, K Yee, A Baevski, M Ott, M Auli, S Edunov - arXiv preprint arXiv:1907.06616, 2019snippet… We train two language models LI and LN on Newscrawl and Commoncrawl respectively, then score every sentence s in Commoncrawl by HI(s)−HN (s). We select a cu- toff of 0.01, and use all sentences that score higher than …urlhttps://arxiv.org/pdf/1907.06616 |
| year2019titleFacilitating access to health web pages with different language complexity levelsauthorsM Alfano, B Lenzitti, D Taibi, M Helfert - 2019snippet… The Web Data Commons (WDC) (Meusel, 2014) contains all Microformat, Microdata and RDFa data extracted from the open repository of web crawl data named Common Crawl (CC)16 … 15 http://webdatacommons.org/ 16 http://commoncrawl.org …urlhttp://doras.dcu.ie/23104/1/ICT4AWE_2019_30_CR.pdf |
| year2019titleFast and Accurate Network Embeddings via Very Sparse Random ProjectionauthorsH Chen, SF Sultan, Y Tian, M Chen, S Skiena - arXiv preprint arXiv:1908.11512, 2019snippet… WWW-200K and WWW-10K [11]: these graphs are derived from the Web graph provided by Common Crawl, where the nodes are hostnames and the edges are the hyperlinks between these websites. For simplicity, we treat this graph as an undirected graph …urlhttps://arxiv.org/pdf/1908.11512 |
| year2019titleFaster Neural Network Training with Data EchoingauthorsD Choi, A Passos, CJ Shallue, GE Dahl - arXiv preprint arXiv:1907.05550, 2019snippet… 2http://commoncrawl.org/2017/07/june-2017-crawl-archive-now-available/ 3Each time a training example is read from disk, it counts as a fresh example. 420k steps for LM1B, 60k for Common Crawl, 110k for ImageNet, 150k for CIFAR-10, and 30k for COCO …urlhttps://arxiv.org/pdf/1907.05550 |
| year2019titleFastSV: A Distributed-Memory Connected Component Algorithm with Fast ConvergenceauthorsY Zhang, A Azad, Z Hu - arXiv preprint arXiv:1910.05971, 2019snippetPage 1. FastSV: A Distributed-Memory Connected Component Algorithm with Fast Convergence Yongzhe Zhang ∗ Ariful Azad † Zhenjiang Hu ‡ Abstract This paper presents a new distributed-memory algorithm called FastSV …urlhttps://arxiv.org/pdf/1910.05971 |
| year2019titleFastText-Based Intent Detection for Inflected LanguagesauthorsK Balodis, D Deksne - Information, 2019snippet… For the word embeddings released by Facebook, we used the ones trained on Wikipedia (https: //fasttext.cc/docs/en/pretrained-vectors.html) because the ones trained on Common Crawl (https: //fasttext.cc/docs/en/crawl-vectors.html) showed inferior results in our tests …urlhttps://www.mdpi.com/2078-2489/10/5/161/pdf |
| year2019titleFeature Engineering for Text RepresentationauthorsD Sarkar - Text Analytics with Python, 2019snippetIn the previous chapters, we saw how to understand, process, and wrangle text data. However, all machine learning or deep learning models are limited because they cannot understand text data directly...urlhttps://link.springer.com/chapter/10.1007/978-1-4842-4354-1_4 |
| year2019titleFeature-Dependent Confusion Matrices for Low-Resource NER Labeling with Noisy LabelsauthorsL Lange, MA Hedderich, D Klakow - arXiv preprint arXiv:1910.06061, 2019snippet… clustering. While the Brown clustering was trained on the relatively small Europarl corpus, k- Means clustering seems to benefit from the word embeddings trained on documents from the much larger common crawl. 7 Analysis …urlhttps://arxiv.org/pdf/1910.06061 |
| year2019titleFeature2Vec: Distributional semantic modelling of human property knowledgeauthorsS Derby, P Miller, B Devereux - arXiv preprint arXiv:1908.11439, 2019snippet… For our experiments, we make use of the pretrained GloVe embeddings (Pennington et al., 2014) provided in the Spacy1 package trained on the Common Crawl2. The GloVe model includes 685,000 tokens … 1https://spacy …urlhttps://arxiv.org/pdf/1908.11439 |
| year2019titleFeeling Anxious? Perceiving Anxiety in Tweets using Machine LearningauthorsD Gruda, S Hasan - Computers in Human Behavior, 2019snippet… tweets. Words-to-vectors mapping is based on the deep neural network learning GloVe (Pennington, Socher, & Manning, 2014) embedding space built from the Common Crawl Web Data (42 Billion tokens, 1.9M vocab). The …urlhttps://www.sciencedirect.com/science/article/pii/S0747563219301608 |
| year2019titleFIESTA: Fast IdEntification of State-of-The-Art models using adaptive bandit algorithmsauthorsHB Moss, A Moore, DS Leslie, P Rayson - arXiv preprint arXiv:1906.12230, 2019snippet… optimiser settings and the same regularisation. All words are lower cased and we use the same Glove common crawl 840B token 300 dimension word embedding (Pennington et al., 2014). We use variational (Gal and Ghahramani …urlhttps://arxiv.org/pdf/1906.12230 |
| year2019titleFigurative Usage Detection of Symptom Words to Improve Personal Health Mention DetectionauthorsA Iyer, A Joshi, S Karimi, R Sparks, C Paris - arXiv preprint arXiv:1906.05466, 2019snippet… The first four are a random initialisation, and three pre-trained embeddings. The pretrained embeddings are: (a) word2vec (Mikolov et al., 2013); (b) GloVe (trained on Common Crawl) (Pennington et al., 2014); and, (c) Numberbatch (Speer et al., 2017) …urlhttps://arxiv.org/pdf/1906.05466 |
| year2019titleFinding Generalizable Evidence by Learning to Convince Q&A ModelsauthorsE Perez, S Karamcheti, R Fergus, J Weston, D Kiela… - arXiv preprint arXiv …, 2019snippet… fastText We define a function BoWFT that computes the average bag-of-words representation of some text using fastText embeddings (Joulin et al., 2017). We use 300-dimensional fastText word vectors pretrained on Common Crawl …urlhttps://arxiv.org/pdf/1909.05863 |
| year2019titleFindings of the First Shared Task on Machine Translation RobustnessauthorsX Li, P Michel, A Anastasopoulos, Y Belinkov… - arXiv preprint arXiv …, 2019snippet… To explore effective approaches to leverage abundant out-of-domain parallel data. • To explore novel approaches to leverage abundant monolingual data on the Web (eg, tweets, Reddit comments, commoncrawl, etc.). • To …urlhttps://arxiv.org/pdf/1906.11943 |
| year2019titleFindings of the WMT 2019 Shared Task on Parallel Corpus Filtering for Low-Resource ConditionsauthorsP Koehn, F Guzmán, V Chaudhary, J Pino - Proceedings of the Fourth Conference on …, 2019snippet… Corpus Sentences Words Wikipedia Sinhala 155,946 4,695,602 Nepali 92,296 2,804,439 English 67,796,935 1,985,175,324 CommonCrawl Sinhala 5,178,491 110,270,445 Nepali 3,562,373 102,988,609 English 380,409,891 8,894,266,960 …urlhttps://www.aclweb.org/anthology/W19-5404 |
| year2019titleFlauBERT: Unsupervised Language Model Pre-training for FrenchauthorsH Le, L Vial, J Frej, V Segonne, M Coavoux… - arXiv preprint arXiv …, 2019snippet… Common Crawl).11 The data were collected from three main sources: (1) monolingual data for French provided in WMT19 shared tasks (Li et al., 2019, 4 sub-corpora); (2) French text corpora offered in the OPUS collection …urlhttps://arxiv.org/pdf/1912.05372 |
| year2019titleFrame Augmented Alternating Attention Network for Video Question AnsweringauthorsW Zhang, S Tang, Y Cao, S Pu, F Wu, Y Zhuang - IEEE Transactions on Multimedia, 2019snippetPage 1. 1520-9210 (c) 2019 IEEE. Personal use is permitted, but republication/ redistribution requires IEEE permission. See http://www.ieee.org/ publications_standards/publications/rights/index.html for more information. This …urlhttps://ieeexplore.ieee.org/abstract/document/8811730/ |
| year2019titleFrequency, acceptability, and selection: A case study of clause-embeddingauthorsAS White, K RawlinssnippetPage 1. Frequency, acceptability, and selection: A case study of clause-embedding Aaron Steven White University of Rochester aaron.white@rochester.edu Kyle Rawlins Johns Hopkins University kgr@jhu.edu Abstract We investigate …urlhttps://ling.auf.net/lingbuzz/004596/current.pdf |
| year2019titleFrom Legal to Technical Concept: Towards an Automated Classification of German Political Twitter Postings as Criminal OffensesauthorsF Zufall, T Horsmann, T Zeschsnippet… We use a bi-directional LSTM (Hochreiter and Schmidhuber, 1997) for classification.30 We use the 300-dimensional German pre-trained word embeddings provided by Grave et al. (2018), which are trained on the German common crawl …urlhttps://www.researchgate.net/profile/Frederike_Zufall/publication/331475806_From_Legal_to_Technical_Concept_Towards_an_Automated_Classification_of_German_Political_Twitter_Postings_as_Criminal_Offenses/links/5ccbe9b0a6fdcc4719838905/From-Legal-to-Technical-Concept-Towards-an-Automated-Classification-of-German-Political-Twitter-Postings-as-Criminal-Offenses.pdf |
| year2019titleFrontiersinpatternrecognitionandartificialintelligenceauthorsB Marleah, N Nicola, SC Yee - 2019snippet |
| year2019titleFrowning Frodo, Wincing Leia, and a Seriously Great Friendship: Learning to Classify Emotional Relationships of Fictional CharactersauthorsE Kim, R Klinger - arXiv preprint arXiv:1903.12453, 2019snippet… We obtain word vectors for the embedding layer from GloVe (pre-trained on Common Crawl, d = 300, Pennington et al., 2014) and initialize out- of-vocabulary terms with zeros (including the po- sition indicators). 4 Experiments Experimental Setting …urlhttps://arxiv.org/pdf/1903.12453 |
| year2019titleFusing Vector Space Models for Domain-Specific ApplicationsauthorsL Rettig, J Audiffren, P Cudré-Mauroux - arXiv preprint arXiv:1909.02307, 2019snippet… Despite the convenience they bring, using such readilyavailable, pre-trained models is often suboptimal in vertical applications [2], [3]; as these models are pre-trained on large, non-specific sources (eg, Wikipedia and the Common …urlhttps://arxiv.org/pdf/1909.02307 |
| year2019titleGating Mechanisms for Combining Character and Word-level Word Representations: An Empirical StudyauthorsJA Balazs, Y Matsuo - arXiv preprint arXiv:1904.05584, 2019snippetPage 1. Gating Mechanisms for Combining Character and Word-level Word Representations: An Empirical Study Jorge A. Balazs and Yutaka Matsuo Graduate School of Engineering The University of Tokyo {jorge, matsuo}@weblab.tu-tokyo.ac.jp Abstract …urlhttps://arxiv.org/pdf/1904.05584 |
| year2019titleGeneral Purpose Vector Representation for Swedish Documents: An application of Neural Language ModelsauthorsS Hedström - 2019snippetPage 1. General Purpose Vector Representation for Swedish Documents An application of Neural Language Models Simon Hedström Master's Thesis in Engineering Physics, Department of Physics, Umeå University, 2019 Page …","url":["https://umu.diva-portal.org/smash/get/diva2:1323994/FULLTEXT01.pdf"]} |
| {"year":"2019","title":"Generalizable prediction of academic performance from short texts on social media","authors":["I Smirnov - arXiv preprint arXiv:1912.00463, 2019"],"snippet":"… We obtained significantly better results with a model that used word-embeddings (see Methods). We also find that embeddings trained on the VK corpus outperform models trained on the Wikipedia and Common Crawl corpora (Table 1). 3 Page 4 …","url":["https://arxiv.org/pdf/1912.00463"]} |
| {"year":"2019","title":"Generalizing Question Answering System with Pre-trained Language Model Fine-tuning","authors":["D Su, Y Xu, GI Winata, P Xu, H Kim, Z Liu, P Fung - … of the 2nd Workshop on Machine …, 2019"],"snippet":"… (2009)) and Common Crawl (Buck et al., 1https://github.com/mrqa/MRQA-SharedTask-2019 2014) for pre-training … 2014. N-gram counts and language models from the common crawl. In LREC, volume 2, page 4. Citeseer …","url":["https://mrqa.github.io/assets/papers/63_Paper.pdf"]} |
| {"year":"2019","title":"Generating Black-Box Adversarial Examples for Text Classifiers Using a Deep Reinforced Model","authors":["P Vijayaraghavan, D Roy - arXiv preprint arXiv:1909.07873, 2019"],"snippet":"… These paraphrase datasets together contains text from various sources: Common Crawl, CzEng1.6, Europarl, News Commentary, Quora questions, and Twitter trending topic tweets. We do not use all the data for our pretraining …","url":["https://arxiv.org/pdf/1909.07873"]} |
| {"year":"2019","title":"Generating composite SQL queries from natural language","authors":["M De Groote - 2018"],"snippet":"… of the questions. We decided to use the Common Crawl embedding that is trained on 42 billion tokens, consists of a vocabulary of 1.9 million tokens and embeds these tokens in the 300-dimensional vector space5. All the words …","url":["https://lib.ugent.be/fulltxt/RUG01/002/494/903/RUG01-002494903_2018_0001_AC.pdf"]} |
| {"year":"2019","title":"Generating Language-Independent Neural Sentence Embeddings for Natural Language Classification Tasks","authors":["S Erhardt"],"snippet":"… [Rud17] At the time this thesis was written, there are Word Embeddings for more than 150 languages, trained on Common Crawl1 and Wikipedia, available. [Rud17] 1An open repository of web crawl data that can be …","url":["https://www.social.in.tum.de/fileadmin/w00bwc/www/Gerhard_Hagerer/thesis.pdf"]} |
| {"year":"2019","title":"Generic Web Content Extraction with Open-Source Software","authors":["A Barbaresi"],"snippet":"… Because of the vastly increasing variety of corpora, text types and use cases, it becomes more and more difficult to assess the usefulness and appropriateness of certain web texts 1https://commoncrawl.org for given research objectives …","url":["https://corpora.linguistik.uni-erlangen.de/data/konvens/proceedings/papers/kaleidoskop/camera_ready_barbaresi.pdf"]} |
| {"year":"2019","title":"Geo-spatial text-mining from Twitter–a feature space analysis with a view toward building classification in urban regions","authors":["M Häberle, M Werner, XX Zhu - European Journal of Remote Sensing, 2019"],"snippet":"Skip to Main Content …","url":["https://www.tandfonline.com/doi/full/10.1080/22797254.2019.1586451"]} |
| {"year":"2019","title":"Ghmerti at SemEval-2019 Task 6: A Deep Word-and Character-based Approach to Offensive Language Identification","authors":["E Doostmohammadi, H Sameti, A Saffar - … of the 13th International Workshop on …, 2019"],"snippet":"… The indices include 256 of the most common characters, plus 0 for padding and 1 for un- known characters. 2. xw which is the embeddings of the words in the input tweet based on FastText's 600Btoken common crawl model (Mikolov et al., 2018) …urlhttps://www.aclweb.org/anthology/S19-2110 |
| year2019titleGLOSS: Generative Latent Optimization of Sentence RepresentationsauthorsSP Singh, A Fan, M Auli - arXiv preprint arXiv:1907.06385, 2019snippet… representations. This could be as simple as using a bag-of-words averaging of Glove (Pennington et al., 2014) word embeddings trained on a corpus such as CommonCrawl, which we re- fer to as Glove-BoW. Methods such …urlhttps://arxiv.org/pdf/1907.06385 |
| year2019titleGraphVite: A High-Performance CPU-GPU Hybrid System for Node EmbeddingauthorsZ Zhu, S Xu, M Qu, J Tang - arXiv preprint arXiv:1903.00757, 2019snippetPage 1. GraphVite: A High-Performance CPU-GPU Hybrid System for Node Embedding Zhaocheng Zhu Mila - Québec AI Institute Université de Montréal zhaocheng.zhu@ umontreal.ca Shizhen Xu Tsinghua University xsz12@mails.tsinghua.edu.cn …urlhttps://arxiv.org/pdf/1903.00757 |
| year2019titleGreen AIauthorsR Schwartz, J Dodge, NA Smith, O Etzioni - arXiv preprint arXiv:1907.10597, 2019snippet… For example, the June 2019 Common Crawl contains 242 TB of uncompressed data,12 so even simple filtering to extract usable text is difficult … 11https://opensource.google.com/ projects/open-images-dataset 12http://commoncrawl.org/2019/07 …urlhttps://arxiv.org/pdf/1907.10597 |
| year2019titleGrounded Response Generation Task at DSTC7authorsM Galley, C Brockett, X Gao, J Gao, B Dolansnippet… Turn 4 still pretty incredible , but quite a bit different that 10,000 meters . Table 1: Sample of the DSTC7 Sentence Generation data, which combines Reddit data (Turns 1-4) along with documents (extracted from Common Crawl) discussed in the conversations …urlhttp://workshop.colips.org/dstc7/papers/DSTC7_Task_2_overview_paper.pdf |
| year2019titleHappy Together: Learning and Understanding Appraisal From Natural LanguageauthorsA Rajendran, C Zhang, M Abdul-Mageedsnippet… language models (ULMFiT). Exploiting Simple GloVe Embeddings For the embedding layer, we obtain the 300-dimensional embedding vector for tokens using GloVe's Common Crawl pre-trained model [13]. GloVe embeddings …","url":["https://mageed.sites.olt.ubc.ca/files/2019/01/AffCon_aaai2019_happyDB.pdf"]} |
| {"year":"2019","title":"HARE: a Flexible Highlighting Annotator for Ranking and Exploration","authors":["D Newman-Griffis, E Fosler-Lussier - arXiv preprint arXiv:1908.11302, 2019"],"snippet":"… ated three commonly used benchmark embedding sets: word2vec skipgram (Mikolov et al., 2013) using GoogleNews,6 FastText skipgram with subword information on WikiNews,7 and GloVe (Pennington et al., 2014) on 840 …","url":["https://arxiv.org/pdf/1908.11302"]} |
| {"year":"2019","title":"HATEMINER at SemEval-2019 Task 5: Hate speech detection against Immigrants and Women in Twitter using a Multinomial Naive Bayes Classifier","authors":["N Chakravartula - Proceedings of the 13th International Workshop on …, 2019"],"snippet":"… Word Embeddings: Glove840B - common crawl, GloveTwitter27B - twitter crawl (Pennington et al., 2014) and fasttext - common crawl (Mikolov et al., 2018) pre-trained word embeddings are used to analyze their impact on the classification …","url":["https://www.aclweb.org/anthology/S19-2071"]} |
| {"year":"2019","title":"HealthSuggestions: moving beyond the beta version","authors":["PMP dos Santos - 2019"],"snippet":"Page 1. FACULDADE DE ENGENHARIA DA UNIVERSIDADE DO PORTO Health Suggestions: Moving Beyond the Beta Version Paulo Miguel Pereira dos Santos Master in Informatics and Computing Engineering Supervisor …","url":["https://repositorio-aberto.up.pt/bitstream/10216/121948/2/347008.2.pdf"]} |
| {"year":"2019","title":"Hierarchical Meta-Embeddings for Code-Switching Named Entity Recognition","authors":["GI Winata, Z Lin, J Shin, Z Liu, P Fung - arXiv preprint arXiv:1909.08504, 2019"],"snippet":"… We use FastText word embeddings trained from Common Crawl and Wikipedia (Grave et al., 2018) for English (es), Spanish (es), including four Romance languages: Catalan (ca), Portuguese (pt), French (fr), Italian …","url":["https://arxiv.org/pdf/1909.08504"]} |
| {"year":"2019","title":"High Quality ELMo Embeddings for Seven Less-Resourced Languages","authors":["M Ulčar, M Robnik-Šikonja - arXiv preprint arXiv:1911.10049, 2019"],"snippet":"… They used 20-million-words data randomly sampled from the raw text released by the CoNLL 2017 Shared Task - Automatically Annotated Raw Texts and Word Embeddings (Ginter et al., 2017), which is a combination of Wikipedia dump and common crawl …","url":["https://arxiv.org/pdf/1911.10049"]} |
| {"year":"2019","title":"Hitachi at MRP 2019: Unified Encoder-to-Biaffine Network for Cross-Framework Meaning Representation Parsing","authors":["Y Koreeda, G Morio, T Morishita, H Ozaki, K Yanai - arXiv preprint arXiv:1910.01299, 2019"],"snippet":"… Named entity label Named entity (NE) recognition is applied to the input text (see Section 7.1). GloVe We use 300-dimensional GloVe (Pennington et al., 2014) pretrained on Common Crawl2 which are kept fixed during the training …","url":["https://arxiv.org/pdf/1910.01299"]} |
| {"year":"2019","title":"HMM, Is This Ethical? Predicting the Ethics of Reddit Life Protips","authors":["M Coots, P Lu, L Wang"],"snippet":"… large corpus of text. GloVe representations have been trained on several large datasets that are publicly available, including corpuses from Wikipedia, Gigaword, Twitter, and Common Crawl [4]. 3. Task Definition Our problem is …","url":["https://madisoncoots.com/files/ethics.pdf"]} |
| {"year":"2019","title":"How Decoding Strategies Affect the Verifiability of Generated Text","authors":["L Massarelli, F Petroni, A Piktus, M Ott, T Rocktäschel… - arXiv preprint arXiv …, 2019"],"snippet":"… consisting of roughly 3 Billion Words; (iv) CC- NEWS, a de-duplicated subset of the English portion of the CommonCrawl news dataset (Nagel, 2016; Bakhtin et al., 2019; Liu et al., 2019a), which totals around 16 Billion words …","url":["https://arxiv.org/pdf/1911.03587"]} |
| {"year":"2019","title":"How to Ask Better Questions? A Large-Scale Multi-Domain Dataset for Rewriting Ill-Formed Questions","authors":["Z Chu, M Chen, J Chen, M Wang, K Gimpel, M Faruqui… - arXiv preprint arXiv …, 2019"],"snippet":"… and En↔Fr. The English-German translation models are trained on WMT datasets, including News Commentary 13, Europarl v7, and Common Crawl, and evaluated on newstest2013 for early stopping. On the newstest2013 …","url":["https://arxiv.org/pdf/1911.09247"]} |
| {"year":"2019","title":"How Well Do Embedding Models Capture Non-compositionality? A View from Multiword Expressions","authors":["N Nandakumar, T Baldwin, B Salehi - Proceedings of the 3rd Workshop on Evaluating …, 2019"],"snippet":"… It tokenises text at the character level. fastText We used the 300-dimensional fastText model pre-trained on Common Crawl and Wikipedia using CBOW (fastTextpre), as well as one trained over the same Wikipedia corpus4 us- ing skip-gram (fastText) …","url":["https://www.aclweb.org/anthology/W19-2004"]} |
| {"year":"2019","title":"Hybrid Rule-Based Model for Phishing URLs Detection","authors":["KS Adewole, AG Akintola, SA Salihu, N Faruk… - International Conference for …, 2019","N Faruk, RG Jimoh - … International Conference, iCETiC 2019, London, UK …, 2019"],"snippet":"… 1. From this figure, data collected from different servers such as Yahoo, Alexa, Common Crawl, PhishTank and OpenPhish are preprocessed in order to extract meaningful features that can be used for categorizing phishing websites from legitimate ones …","url":["http://books.google.de/books?hl=en&lr=lang_en&id=QF6mDwAAQBAJ&oi=fnd&pg=PA119&dq=commoncrawl&ots=T7vreYeKah&sig=sO3M90XucnzXO7OeF6horBncwb4","https://link.springer.com/chapter/10.1007/978-3-030-23943-5_9"]} |
| {"year":"2019","title":"Hybrid Words Representation for Airlines Sentiment Analysis","authors":["U Naseem, SK Khan, I Razzak, IA Hameed"],"snippet":"… GloVe uses ratios of co-occurrence probabilities. It is favourable to concatenate ELMo embeddings with traditional word embeddings. In this work, we have used pre-trained GloVe embedding (trained on 840 billion token from common crawl) of 300 dimensions …","url":["https://www.researchgate.net/profile/Ibrahim_Hameed/publication/336579383_Hybrid_Words_Representation_for_Airlines_Sentiment_Analysis/links/5da6e53892851caa1ba6f8c6/Hybrid-Words-Representation-for-Airlines-Sentiment-Analysis.pdf"]} |
| {"year":"2019","title":"Hyper: Distributed Cloud Processing for Large-Scale Deep Learning Tasks","authors":["D Buniatyan - arXiv preprint arXiv:1910.07172, 2019"],"snippet":"… [4] MinIO high performance object storage server compatible with Amazon S3 API. https://github.com/minio/minio, 2018. [Online; accessed 31- May-2019]. [5] Common Crawl Dataset. https://commoncrawl.org, 2019. [Online; accessed 31-May-2019] …","url":["https://arxiv.org/pdf/1910.07172"]} |
| {"year":"2019","title":"Hyperparameter Tuning for Deep Learning in Natural Language Processing","authors":["A Aghaebrahimian, M Cieliebak - 2019"],"snippet":"… on the Common Crawl, one on 42 and the other on 840 billion tokens), FastText (Bojanowski et al., 2016), dependency based (Levy and Goldberg, 2014), and ELMo (Peters et al., 2018). As shown in Ta- ble 1, the Glove …","url":["http://ceur-ws.org/Vol-2458/paper5.pdf"]} |
| {"year":"2019","title":"Identification Of Bot Accounts In Twitter Using 2D CNNs On User-generated Contents","authors":["M Polignano, MG de Pinto, P Lops, G Semeraro - 2019"],"snippet":"… FastTextEmb)8: 300 dimensionality vectors, composed by a vocabulary of 2 million words and n-grams of the words, case sensitive and obtained from 600 billion of tokens trained on data crawled from generic Internet web pages by Common Crawl nonprofit organization; …","url":["https://www.researchgate.net/profile/Marco_Polignano/publication/334636395_Identification_Of_Bot_Accounts_In_Twitter_Using_2D_CNNs_On_User-generated_Contents/links/5d373c10a6fdcc370a59e892/Identification-Of-Bot-Accounts-In-Twitter-Using-2D-CNNs-On-User-generated-Contents.pdf"]} |
| {"year":"2019","title":"Identification of Good and Bad News on Twitter","authors":["P Aggarwal, A Aker"],"snippet":"… We use tf-idf representation for each vocabulary term. 5.2.3 Embeddings Finally, we also use fasttext based embedding (Mikolov et al., 2018) vectors which are trained on common crawl having 600 billion tokens. 5.3 Classifiers …","url":["https://www.researchgate.net/profile/Ahmet_Aker3/publication/334825190_Identification_of_Good_and_Bad_News_on_Twitter/links/5d42c34992851cd04697548a/Identification-of-Good-and-Bad-News-on-Twitter.pdf"]} |
| {"year":"2019","title":"Identifying and Addressing Structural Inequalities in the Representativeness of Geographic Technologies","authors":["IL Johnson - 2019"],"snippet":"… knowledge graphs (Wikipedia and Google [289]), word embeddings (Wikipedia, Twitter, and Common Crawl in GloVe embeddings [238]), object detection (Instagram hashtags and Facebook [292])—and adding …","url":["http://search.proquest.com/openview/dccae6679751f41f283b33f555947aa8/1?pq-origsite=gscholar&cbl=18750&diss=y"]} |
| {"year":"2019","title":"Identifying transfer models for machine learning tasks","authors":["P Watson, B Bhattacharjee, NC CODELLA… - US Patent App. 15/982,622, 2019"],"snippet":"US20190354850A1 - Identifying transfer models for machine learning tasks - Google Patents. Identifying transfer models for machine learning tasks. Download PDF Info. Publication number US20190354850A1. US20190354850A1 …","url":["https://patents.google.com/patent/US20190354850A1/en"]} |
| {"year":"2019","title":"Idiap Abstract Text Summarization System for German Text Summarization Task","authors":["S Parida, P Motlicek - 2019"],"snippet":"… The experiments performed over 1http://opennmt.net/OpenNMT-py/ Summarization.html 2https://www.swisstext.org/ 3http://commoncrawl.org/ these datasets are described in the Section 4 (de- noted as S1 experimental …","url":["http://ceur-ws.org/Vol-2458/paper9.pdf"]} |
| {"year":"2019","title":"IIT Varanasi at HASOC 2019: Hate Speech and Offensive Content Identification in Indo-European Languages","authors":["A Mishra, S Pal - Proceedings of the 11th annual meeting of the Forum …"],"snippet":"… embedding. One of the pretrained glove embeddings is based on the common crawl which represents each word in the dimension of 300, and the other one is based on Twitter data which represents each word in the dimension of 200 …","url":["http://irlab.daiict.ac.in/~Parth/T3-22.pdf"]} |
| {"year":"2019","title":"IIT-BHU at CIQ 2019: Classification of Insincere Questions","authors":["A Mishra, S Pal"],"snippet":"… Different versions of glove pre-trained em- bedding exist; however, we use embedding trained of dimension 300 on common crawl using 840B tokens and 2.2M vocabulary3. We generated random embedding of dimension 300 for out of vocabulary words …","url":["http://irlab.daiict.ac.in/~Parth/T5-4.pdf"]} |
| {"year":"2019","title":"Impact of Debiasing Word Embeddings on Information Retrieval","authors":["E Gerritse - 2019"],"snippet":"… Bolukbasi et al. [1] show that there is a high correlation in bias in Word2Vec trained on Google News and Glove trained on the common crawl, so we still cannot infer whether the method or the dataset is more important for creating the bias …","url":["http://www.emmagerritse.com/pdfs/FDIA_2019_paper.pdf"]} |
| {"year":"2019","title":"Improved Quality Estimation of Machine Translation with Pre-trained Language Representation","authors":["G Miao, H Di, J Xu, Z Yang, Y Chen, K Ouchi - CCF International Conference on …, 2019"],"snippet":"… The former is mainly obtained from the open news datasets of the WMT17 and WMT18 MT evaluation tasks, including five data sets: Europarl v7, Europarl v12, Europarl v13, Common Crawl corpus, and Rapid corpus of EU press releases …","url":["https://link.springer.com/chapter/10.1007/978-3-030-32233-5_32"]} |
| {"year":"2019","title":"Improving Conditioning in Context-Aware Sequence to Sequence Models","authors":["X Wang, J Weston, M Auli, Y Jernite - arXiv preprint arXiv:1911.09728, 2019"],"snippet":"… 2019) for LFQA. The dataset consists of 272,000 complex questions and answer pairs, along with supporting documents created by gathering and concatenating passages from CommonCrawl web pages which are relevant to the question …","url":["https://arxiv.org/pdf/1911.09728"]} |
| {"year":"2019","title":"Improving Grammatical Error Correction via Pre-Training a Copy-Augmented Architecture with Unlabeled Data","authors":["W Zhao, L Wang, K Shen, R Jia, J Liu - arXiv preprint arXiv:1903.00138, 2019"],"snippet":"… We do not use reranking when evaluating the CoNLL-2014 data sets. But we rerank the top 12 hypothesizes us- ing the language model trained on Common Crawl (Junczys-Dowmunt and Grundkiewicz, 2016) for …","url":["https://arxiv.org/pdf/1903.00138"]} |
| {"year":"2019","title":"Improving Implicit Stance Classification in Tweets Using Word and Sentence Embeddings","authors":["R Schaefer, M Stede - Joint German/Austrian Conference on Artificial …, 2019"],"snippet":"… combinations. 4.2 fastText Embeddings. We use pre-trained 300-dimensional fastText [11] word vectors that have been trained on Wikipedia and Common Crawl data. For training, an extension of the CBOW model has been used …","url":["https://link.springer.com/chapter/10.1007/978-3-030-30179-8_26"]} |
| {"year":"2019","title":"Improving Named Entity Recognition with Commonsense Knowledge Pre-training","authors":["G Dekhili, NT Le, F Sadat - Pacific Rim Knowledge Acquisition Workshop, 2019"],"snippet":"… which is the concatenation of ConceptNet PPMI embeddings with Word2Vec embeddings trained on 100 billion words of Google News using skip-grams with negative sampling [14] and GloVe 1.2 embeddings trained on 840 billion words of the Common Crawl [16] …","url":["https://link.springer.com/chapter/10.1007/978-3-030-30639-7_2"]} |
| {"year":"2019","title":"Improving Neural Machine Translation of Subtitles with Finetun-ing","authors":["S Reinsperger - 2019"],"snippet":"… 3 Results 53 3.1 ParallelCorpora . . . . . 54 3.1.1 Europarl. . . . . 54 3.1.2 Common Crawl . . . . . 54 3.1.3 NewsCommentary . . . . . 56 3.1.4 Subtitles …","url":["http://www.simonrsp.com/masterthesis.pdf"]} |
| {"year":"2019","title":"Improving Neural Machine Translation Robustness via Data Augmentation: Beyond Back Translation","authors":["Z Li, L Specia - arXiv preprint arXiv:1910.03009, 2019"],"snippet":"… 3.1 Corpora We used all parallel corpora from the WMT19 Robustness Task on Fr↔En. For out-of-domain training, we used the WMT15 Fr↔En News Translation Task data, including Europarl v7, Common Crawl, UN, News Commentary v10, and Gigaword Corpora …","url":["https://arxiv.org/pdf/1910.03009"]} |
| {"year":"2019","title":"Improving Neural Machine Translation with Pre-trained Representation","authors":["R Weng, H Yu, S Huang, W Luo, J Chen - arXiv preprint arXiv:1908.07688, 2019"],"snippet":"… We use newstest2015 (NST15) as our validation set, and newstest2016 (NST16) as test sets 4. We use 40 million monolingual sentences from WMT-16 Common Crawl data-set … We use 5 million monolingual sentences …","url":["https://arxiv.org/pdf/1908.07688"]} |
| {"year":"2019","title":"Improving orienteering-based tourist trip planning with social sensing","authors":["F Persia, G Pilato, M Ge, P Bolzoni, D D'Auria… - Future Generation …, 2019snippet… This is a popular technique in machine learning for uncovering subsymbolic meanings, such as word analogies. We utilized a pre-trained word vector encoding for Italian provided by fastText [32], which was trained on Common Crawl and Wikipedia …urlhttps://www.sciencedirect.com/science/article/pii/S0167739X19303929 |
| year2019titleImproving Quality Estimation of Machine Translation by Using Pre-trained Language RepresentationauthorsG Miao, H Di, J Xu, Z Yang, Y Chen, K Ouchi - China Conference on Machine …, 2019Y Chen, K Ouchi - Machine Translation: 15th China Conference, CCMT …, 2019snippet… Metrics We first train the bilingual expert model [9] with large-scale parallel corpus released for the WMT17/WMT18 News Machine Translation Task, which mainly consists of five data sets, including Europarl v7, Europarl v12 …urlhttp://books.google.de/books?hl=en&lr=lang_en&id=WuK_DwAAQBAJ&oi=fnd&pg=PA11&dq=commoncrawl&ots=XTi4UL5q8i&sig=lCeqF4TBBuqQrg4EE0rN09FZeVshttps://link.springer.com/chapter/10.1007/978-981-15-1721-1_2 |
| year2019titleImproving Question Answering over Incomplete KBs with Knowledge-Aware ReaderauthorsW Xiong, M Yu, S Chang, X Guo, WY Wang - arXiv preprint arXiv:1905.07098, 2019snippet… Page 6. A Implementation Details Throughout our experiments, we use the 300-dimension GloVe embeddings trained on the Common Crawl corpus. The hidden dimension of LSTM and the dimension of entity embeddings are both 100 …urlhttps://arxiv.org/pdf/1905.07098 |
| year2019titleIn-call virtual assistantauthorsR Raanani, R Levy, MY Breakstone - US Patent App. 16/165,566, 2019snippet… At the same time, natural language processing (NLP) approaches to both topic modeling and world-knowledge modeling, have become much more efficient due to the availability of large, freely accessible natural language corpora (eg, CommonCrawl), as well as freely …urlhttps://patentimages.storage.googleapis.com/b2/cd/2c/a7fa39e3002b4f/US20190057698A1.pdf |
| year2019titleIncendiary News DetectionauthorsEB Coban, E Filatova - 2019snippet… features. We run classification experiments for unigrams, and combination of uniand bi-grams. 10https://www.nltk.org/ 11http://scikit-learn.org 12https://github.com/ otuncelli/turkish-stemmer-python 13http://commoncrawl.org/ For …urlhttps://pdfs.semanticscholar.org/8c78/f9da879fc5936ef84dc7128db691d7042fef.pdf |
| year2019titleIncorporating Domain Knowledge into Natural Language Inference on Clinical TextsauthorsM Lu, Y Fang, F Yan, M Li - IEEE Access, 2019snippet… two domain-specific corpus: • GloVe[CC]: GloVe embeddings [21], trained on Common Crawl. • fastText[BioASQ]: fastText embeddings [22], trained on PubMed abstracts from the BioASQ challenge [23]. • fastText[MIMIC-III]: fastText …urlhttps://ieeexplore.ieee.org/iel7/6287639/6514899/08701433.pdf |
| year2019titleIncorporating Syntactic Knowledge in Neural Quality Estimation for Machine TranslationauthorsN Ye, Y Wang, D Cai - China Conference on Machine Translation, 2019snippet… One is the large-scale bilingual dataset for training the feature extraction module. It comes from the parallel corpus of WMT machine translation task, including Europarl v7, Common Crawl corpus, News Commentary v11 and so on …urlhttps://link.springer.com/chapter/10.1007/978-981-15-1721-1_3 |
| year2019titleInducing Relational Knowledge from BERTauthorsZ Bouraoui, J Camacho-Collados, S Schockaert - arXiv preprint arXiv:1911.12753, 2019snippet… As static word embeddings for the baselines, we will use the Skip-gram word vectors that were pre-trained from the 100B words Google News data set6 (SG-GN) and GloVe word vectors which were pre-trained from the …urlhttps://arxiv.org/pdf/1911.12753 |
| year2019titleInducing Schema. org Markup from Natural Language ContextauthorsGK Shahi, D Nandini, S Kumari - Kalpa Publications in Computing, 2019snippet… extension, in 2012 another data hub called Web Data Commons [5] came up with structured data extracted from the Common Crawl … 5http:// commoncrawl.org/ 6http://webdatacommons.org/ 7The WARC file format …urlhttps://easychair.org/publications/download/DXGr |
| year2019titleInferring Concept Hierarchies from Text Corpora via Hyperbolic EmbeddingsauthorsM Le, S Roller, L Papaxanthos, D Kiela, M Nickel - arXiv preprint arXiv:1902.00913, 2019snippetPage 1. Inferring Concept Hierarchies from Text Corpora via Hyperbolic Embeddings Matt Le1 and Stephen Roller1 and Laetitia Papaxanthos2 Douwe Kiela1 and Maximilian Nickel1 1Facebook AI Research, New York …urlhttps://arxiv.org/pdf/1902.00913 |
| year2019titleInformation extractionauthorsS Razniewskisnippet… 8 Page 9. Taxi [Panchenko et al., 2016] 1. Crawl domain-specific text corpora in addition to WP, Commoncrawl 2. Candidate hypernymy extraction 1. Via substrings • “biomedical science” isA “science” • “microbiology” isA “biology” • “toast with bacon” isA “toast” …urlhttps://www.mpi-inf.mpg.de/fileadmin/inf/d5/teaching/ws19-20_ie/5_Taxonomy_induction_coreference_disambiguation.pdf |
| year2019titleInriaFBK Drawing Attention to Offensive Language at Germeval2019authorsM Corazza, S Menini, E Cabrio, S Tonelli, S Villata…snippet… This is the main reason why we chose to use FastText embeddings (Bojanowski et al., 2016), pretrained on Common Crawl and Wikipedia 3. 4.3 Recurrent model We develop a simple recurrent neural network model and use it for all subtasks …urlhttps://corpora.linguistik.uni-erlangen.de/data/konvens/proceedings/papers/germeval/Germeval_Task_2_2019_paper_1.INRIA.pdf |
| year2019titleIntegrating Grammatical Features into CNN Model for Emotion ClassificationauthorsAC Le - 2018 5th NAFOSTED Conference on Information and …, 2018snippet… a sentence s = 11 In this study we used the vector set GloVe [16], it is pretrained word vectors for Common Crawl (glove.42B.300d) with 300 dimensions for word embeddings to use for English data. For Vietnamese emotion …urlhttps://ieeexplore.ieee.org/abstract/document/8606875/ |
| year2019titleIntegrating UMLS for Early Detection of Sings of AnorexiaauthorsFM Plaza-del-Arco, P López-Úbeda, MC Dıaz-Galiano… - 2019snippet… Specifically, we use Page 6. the available pre-trained statistical models for English ”en core web md” wich version is 1.2.0. It is composed of 685k keys, 20k unique vectors (300 dimensions) and it was trained on OntoNotes …urlhttp://www.dei.unipd.it/~ferro/CLEF-WN-Drafts/CLEF2019/paper_76.pdf |
| year2019titleIntegrating word embeddings and document topics with deep learning in a video classification frameworkauthorsZ Kastrati, AS Imran, A Kurti - Pattern Recognition Letters, 2019snippet… GloVe contains word embeddings for a vocabulary of 400K words trained on 42 billion words from Wikipedia pages and newswire, and fastText includes word embeddings for a vocabulary of 2 million words trained on 600 billion tokens from Common Crawl …urlhttps://www.sciencedirect.com/science/article/pii/S0167865519302326 |
| year2019titleIntelligent sentiment analysis approach using edge computing‐based deep learning techniqueauthorsH Sankar, V Subramaniyaswamy, V Vijayakumar… - Software: Practice and Experiencesnippet… Word2Vec, 300d, 3 Million, 100 Billion. Common Crawl, 300d, 42 Billion, 1.9 Million. Common Crawl, 300d, 840 Billion, 2.2 Million. The main drawback of unsupervised word embedding learning is that it does not hold the sentiment …urlhttps://onlinelibrary.wiley.com/doi/abs/10.1002/spe.2687 |
| year2019titleInteractive Language Learning by Question AnsweringauthorsX Yuan, MA Cote, J Fu, Z Lin, C Pal, Y Bengio… - arXiv preprint arXiv …, 2019snippetPage 1. Interactive Language Learning by Question Answering Xingdi Yuan♥∗ Marc-Alexandre Côté♥∗ Jie Fu♣♠ Zhouhan Lin♦♠ Christopher Pal♣♠ Yoshua Bengio♦♠ Adam Trischler♥ ♥Microsoft Research, Montréal ♣Polytechnique …urlhttps://arxiv.org/pdf/1908.10909 |
| year2019titleInteractive Machine Comprehension with Information Seeking AgentsauthorsX Yuan, J Fu, MA Cote, Y Tay, C Pal, A Trischler - arXiv preprint arXiv:1908.10449, 2019snippet… Word embeddings are initialized by the 300-dimension fastText (Mikolov et al. 2018) vectors trained on Common Crawl (600B tokens), and are fixed during training. Character embeddings are initialized by 200-dimension random vectors …urlhttps://arxiv.org/pdf/1908.10449 |
| year2019titleInternet of Things Anomaly Detection using Multivariate AnalysisauthorsS Ezekiel, AA Alshehri, L Pearlstein, XW Wu, A Lutz - The 3rd ICICPE 2019 Conference …snippet… Our model uses the GloVe (Pennington et al., 2014) 300-dimensional vectors trained on the Common Crawl corpus with 42B tokens as word level features, as this resulted in the best performance in preliminary experiments …urlhttp://icicpe.org/wp-content/uploads/2019/12/ICICPE-2019-vol.31.pdf#page=90 |
| year2019titleIot-based call assistant deviceauthorsR Raanani, R Levy, MY Breakstone - US Patent App. 16/168,663, 2019snippet… At the same time, natural language processing (NLP) approaches to both topic modeling and world-knowledge modeling, have become much more efficient due to the availability of large, freely accessible natural language corpora (eg, CommonCrawl), as well as freely …urlhttps://patentimages.storage.googleapis.com/c3/a1/97/799532a8db7406/US20190057079A1.pdf |
| year2019titleIterative Keyword OptimizationauthorsA Elyashar, M Reuben, R Puzissnippet… The model was trained on Common Crawl 4 and Wikipedia 5 using the fastText library 6. We used Euclidean as the distance measure … 4 http://commoncrawl.org/ 5 https://www.wikipedia.org/ 6 https://fasttext …urlhttp://sbp-brims.org/2019/proceedings/papers/working_papers/Elyashar.pdf |
| year2019titleJHU 2019 Robustness Task System DescriptionauthorsM Post, K Duh - Proceedings of the Fourth Conference on Machine …, 2019snippet… the best million lines each of CommonCrawl, Gigaword, and the UN corpus; and • the MTNT training data. Data sizes are indicated in Table 1. dataset segments words Europarl 2.0m 50.2m News Commentary 200k 4.4m …urlhttps://www.aclweb.org/anthology/W19-5366 |
| year2019titleJohns Hopkins University Submission for WMT News Translation TaskauthorsK Marchisio, YK Lal, P Koehn - Proceedings of the Fourth Conference on Machine …, 2019snippet… sampled bitext (x2). ParaCrawl1 and Common Crawl2 are filtered similarly, and added to form the training set for the final models. We … Crawl. ParaCrawl and Common Crawl were combined into a single corpus before filtering …urlhttps://www.aclweb.org/anthology/W19-5329 |
| year2019titleJoint Training for Neural Machine TranslationauthorsY ChengsnippetPage 1. Springer Recognizing Theses Outstanding Ph.D. Research Yong Cheng Joint Neural Translation Training Machine for Page 2. Springer Theses Recognizing Outstanding Ph.D. Research Page 3. Aims and Scope The …urlhttp://books.google.de/books?hl=en&lr=lang_en&id=KIOrDwAAQBAJ&oi=fnd&pg=PR5&dq=commoncrawl&ots=vy1Stpb4X-&sig=1d6kjXbtaE3McDjxvY7O9-JJQOk |
| year2019titleJointly Learning to Align and Translate with Transformer ModelsauthorsS Garg, S Peitz, U Nallasamy, M Paulik - arXiv preprint arXiv:1909.02074, 2019SGSPU Nallasamy, M Pauliksnippet… by Vilar et al. (2006). We use all available bilingual data (Europarl v7, Common Crawl corpus, News Commentary v13 and Rapid corpus of EU press releases) excluding the ParalCrawl corpus. We remove sentences longer …urlhttps://arxiv.org/pdf/1909.02074https://www.researchgate.net/profile/Stephan_Peitz/publication/336996532_Jointly_Learning_to_Align_and_Translate_with_Transformer_Models/links/5ec41124458515626cb813b1/Jointly-Learning-to-Align-and-Translate-with-Transformer-Models.pdf |
| year2019titleJParaCrawl: A Large Scale Web-Based English-Japanese Parallel CorpusauthorsM Morishita, J Suzuki, M Nagata - arXiv preprint arXiv:1911.10668, 2019snippet… To select the candidate domains, we first identified the language of all the Common Crawl text data by CLD26 and counted how much … Since the crawled data stored on Common Crawl may not contain the entire website or might …urlhttps://arxiv.org/pdf/1911.10668 |
| year2019titleKaWAT: A Word Analogy Task Dataset for IndonesianauthorsK Kurniawan - arXiv preprint arXiv:1906.09912, 2019snippet… We used fastText pretrained embeddings introduced in (Bojanowskietal.,2017) and (Grave et al., 2018), which have been trained on Indonesian Wikipedia and Indonesian Wikipedia plus Common Crawl data respectively. We …urlhttps://arxiv.org/pdf/1906.09912 |
| year2019titleKeyphrase Extraction from Scholarly Articles as Sequence Labeling using Contextualized EmbeddingsauthorsD Sahrawat, D Mahata, M Kulkarni, H Zhang… - arXiv preprint arXiv …, 2019snippet… and OpenAI GPT-2 (small, medium). As a baseline, we also use 300 dimensional fixed embeddings from Glove2, Word2Vec3, and FastText4 (common-crawl, wiki-news). We also compare the proposed architecture against …urlhttps://arxiv.org/pdf/1910.08840 |
| year2019titleKiloGrams: Very Large N-Grams for Malware ClassificationauthorsE Raff, W Fleming, R Zak, H Anderson, B Finlayson… - arXiv preprint arXiv …, 2019snippet… A ccuracy s = 1 s = ⌈n/4⌉ Figure 1: Balanced Accuracy results (y-axis) on the Public PDF dataset as we increase then-gram size (x-axis, log-scale), and alter the hashing stride s. Using a hashing-stride retains more …urlhttps://arxiv.org/pdf/1908.00200 |
| year2019titleKIT's Submission to the IWSLT 2019 Shared Task on Text Translation","authors":["F Schneider, A Waibel"],"snippet":"… We made use of all allowed data, which is broken down in table 1. The allowed parallel data from WMT consists of Commoncrawl, CzEng (which makes up the vast majority of the parallel training data), Europarl, news commentrary and paracrawl …","url":["https://zenodo.eu/record/3525496/files/IWSLT2019_paper_30.pdf"]} |
| {"year":"2019","title":"Knowledge empowered prominent aspect extraction from product reviews","authors":["Z Luo, S Huang, KQ Zhu - Information Processing & Management, 2019"],"snippet":"Skip to main content …","url":["https://www.sciencedirect.com/science/article/pii/S0306457318305193"]} |
| {"year":"2019","title":"Knowledge Graph-Driven Conversational Agents","authors":["J Bockhorst, D Conathan, G Fung"],"snippet":"… We use a CNN with max pooling and pretrained Glove embeddings trained on the Common Crawl 840B dataset [6] [7]. By applying our CNN classifier as a straightforward 1-of-k document classification task, we are able to achieve …","url":["https://kr2ml.github.io/2019/papers/KR2ML_2019_paper_42.pdf"]} |
| {"year":"2019","title":"Knowledge-based Conversational Search","authors":["S Vakulenko - arXiv preprint arXiv:1912.06859, 2019"],"snippet":"Page 1. arXiv:1912.06859v1 [cs.IR] 14 Dec 2019 Page 2. Page 3. Knowledge-based Conversational Search DISSERTATION submitted in partial fulfillment of the requirements for the degree of Doktorin der Technischen Wissenschaften by Svitlana Vakulenko, MSc …","url":["https://arxiv.org/pdf/1912.06859"]} |
| {"year":"2019","title":"Kyoto University participation to the WMT 2019 news shared task","authors":["F Cromieres, S Kurohashi - Proceedings of the Fourth Conference on Machine …, 2019"],"snippet":"… Page 2. 164 3 Data preprocessing 3.1 Data used For bilingual data, we used the provided corpora: europarl (≈ 1.7M sentence pairs), common crawl(≈ 620k sentence pairs) and newscommentary (≈ 255k sentence pairs). We did not use the paracrawl corpus …","url":["https://www.aclweb.org/anthology/W19-5312"]} |
| {"year":"2019","title":"Language Modelling Makes Sense: Propagating Representations through WordNet for Full-Coverage Word Sense Disambiguation","authors":["D Loureiro, A Jorge - arXiv preprint arXiv:1906.10007, 2019"],"snippet":"… tokens in the sentence. We choose fastText (Bojanowski et al., 2017) embeddings (pretrained on CommonCrawl), which are biased towards morphology, and avoid Out-of-Vocabulary issues as explained in §2.1. We use fastText …","url":["https://arxiv.org/pdf/1906.10007"]} |
| {"year":"2019","title":"Language Models are Unsupervised Multitask Learners","authors":["A Radford, J Wu, R Child, D Luan, D Amodei…"],"snippet":"… A promising source of diverse and nearly unlimited text is web scrapes such as Common Crawl … Trinh & Le (2018) used Common Crawl in their work on commonsense reasoning but noted a large amount of documents “whose content are mostly unintelligible” …","url":["https://www.techbooky.com/wp-content/uploads/2019/02/Better-Language-Models-and-Their-Implications.pdf"]} |
| {"year":"2019","title":"Language Models with Pre-Trained (GloVe) Word Embeddings","authors":["L Rokach, B Shapira, V Makarenkov"],"snippet":"… Despite the huge size of the Common Crawl corpus, some words may not exist with the embeddings, so we set these words to random vectors, and use the same embeddings consistently if we encounter the same unseen word again in the text …","url":["https://deepai.org/publication/language-models-with-pre-trained-glove-word-embeddings"]} |
| {"year":"2019","title":"Large Memory Layers with Product Keys","authors":["G Lample, A Sablayrolles, MA Ranzato, L Denoyer… - arXiv preprint arXiv …, 2019","MA Ranzato, L Denoyer, H Jégou"],"snippet":"… Experiments Page 20. Dataset 20 ▶ Extracted from the public Common Crawl. ▶ 40 million English news articles in training set, 5000 in validation and test set each. ▶ Did not shuffle sentences, allowing the model to learn …","url":["https://arxiv.org/pdf/1907.05242","https://pdfs.semanticscholar.org/3a54/100803474df3b98e54a1693010d12c9718b5.pdf"]} |
| {"year":"2019","title":"Large Scale Linguistic Processing of Tweets to Understand Social Interactions among Speakers of Less Resourced Languages: The Basque Case","authors":["J Fernandez de Landa, R Agerri, I Alegria - Information, 2019"],"snippet":"… resourced languages such as Basque. However, FastText provides pre-trained models for many languages, including Basque [33] by using the common crawl data (http://commoncrawl.org). The Basque model they distribute …","url":["https://www.mdpi.com/2078-2489/10/6/212/pdf"]} |
| {"year":"2019","title":"Last-Mile TLS Interception: Analysis and Observation of the Non-Public HTTPS Ecosystem","authors":["XC de Carnavalet - 2019"],"snippet":"Page 1. Last-Mile TLS Interception: Analysis and Observation of the Non-Public HTTPS Ecosystem Xavier de Carné de Carnavalet A thesis in The Concordia Institute for Information Systems Engineering Presented …","url":["http://users.encs.concordia.ca/~mmannan/student-resources/Thesis-PhD-Carnavalet-2019.pdf"]} |
| {"year":"2019","title":"Latent Question Interpretation Through Parameter Adaptation","authors":["T Parshakova, F Rameau, A Serdega, I Kweon, DS Kim - IEEE/ACM Transactions on …, 2019"],"snippet":"… A. Implementation Details For the sake of reproducibility, we provide the technical details related to the implementation of our approach. First of all, the initial word embeddings are initialized with GloVe embeddings, which …","url":["https://www.researchgate.net/profile/Francois_Rameau/publication/334633405_Latent_Question_Interpretation_Through_Parameter_Adaptation/links/5d37e05ca6fdcc370a5a3a43/Latent-Question-Interpretation-Through-Parameter-Adaptation.pdf"]} |
| {"year":"2019","title":"Laying the foundations for benchmarking open data automatically: a method for surveying data portals from the whole web","authors":["A Sheffer Correa, F Soares Correa Da Silva - 20th Annual International Conference …, 2019"],"snippet":"… KEYWORDS Open Data, Common Crawl, CKAN, Socrata, ArcGIS, OpenDataSoft … Common Crawl conducts crawls once a month and persists all the content in Web Archive (WARC) file format to allow multibillion web page archives with hundreds of terabytes in size …","url":["https://dl.acm.org/citation.cfm?id=3325257"]} |
| {"year":"2019","title":"LCEval: Learned Composite Metric for Caption Evaluation","authors":["N Sharif, L White, M Bennamoun, W Liu, SAA Shah"],"snippet":"… Table 1: The details of pre-trained embeddings used in our experiments Name Source Dimensions Corpus Corpus Size Vocabulary Size GloVE 840B 300d [40] 300 Common Crawl 8.40E+11 2.20E+06 Word2vec Google 300d [34] …","url":["https://www.researchgate.net/profile/Naeha_Sharif2/publication/334760575_LCEval_Learned_Composite_Metric_for_Caption_Evaluation/links/5d429677a6fdcc370a715269/LCEval-Learned-Composite-Metric-for-Caption-Evaluation.pdf"]} |
| {"year":"2019","title":"Learning as the Unsupervised Alignment of Conceptual Systems","authors":["BD Roads, BC Love - arXiv preprint arXiv:1906.09012, 2019"],"snippet":"… We found that alignment correlations positively correlated with mapping accuracy across a variety of scenarios (Figure 3A-C). The three conceptual systems were derived from a Common Crawl text corpus (Pennington et …","url":["https://arxiv.org/pdf/1906.09012"]} |
| {"year":"2019","title":"Learning from Personal Longitudinal Dialog Data","authors":["C Welch, V Pérez-Rosas, JK Kummerfeld, R Mihalcea…"],"snippet":"… Message Embeddings: We also obtain word vector representations for each message using the GloVe Common Crawl pre-trained model.19 We chose this word embedding over other off-theshelf options because the Common …","url":["https://sentic.net/personal-longitudinal-dialog-data.pdf"]} |
| {"year":"2019","title":"Learning multilingual topics through aspect extraction from monolingual texts","authors":["J Huber, M Spiliopoulou - Proceedings of the Fifth International Workshop on …, 2019"],"snippet":"… Xu et al., 2018). It was trained on the CommonCrawl corpus, a general-purpose text corpus that includes text from several billion web pages; the GloVe embeddings were trained on 840 billion tokens. The GloVe set includes …","url":["http://www.aclweb.org/anthology/W19-0313"]} |
| {"year":"2019","title":"Learning Outside the Box: Discourse-level Features Improve Metaphor Identification","authors":["J Mu, H Yannakoudakis, E Shutova - arXiv preprint arXiv:1904.02246, 2019"],"snippet":"… To learn representations, we use several widelyused embedding methods:4 GloVe We use 300-dimensional pre-trained GloVe embeddings (Pennington et al., 2014) trained on the Common Crawl corpus as representations of a lemma and its arguments …","url":["https://arxiv.org/pdf/1904.02246"]} |
| {"year":"2019","title":"Learning Relational Fractals for Deep Knowledge Graph Embedding in Online Social Networks","authors":["J Zhang, L Tan, X Tao, D Wang, JJC Ying, X Wang - International Conference on Web …, 2019"],"snippet":"… Our twitter dataset was live streamed from a twitter API account and contains a maximum of 1675882 nodes and 160799842 links. The Google dataset was obtained from the repositories of common crawl and was sentilyzed from the stripped down WET file contents …","url":["https://link.springer.com/chapter/10.1007/978-3-030-34223-4_42"]} |
| {"year":"2019","title":"Learning to Generate Personalized Product Descriptions","authors":["G Elad, I Guy, K Radinsky, S Novgorodov, B Kimelfeld - 2019"],"snippet":"… For the title representation, we used fastText word embeddings2 pre-trained on Common Crawl and Wikipedia [25, 33], weighted based on each word's TF-IDF score [4].3 In addition, we included as features the participant's demo …","url":["http://www.kiraradinsky.com/files/Learning_to_Generate_Personalized_Product_Descriptions.pdf"]} |
| {"year":"2019","title":"Learning to Speak and Act in a Fantasy Text Adventure Game","authors":["J Urbanek, A Fan, S Karamcheti, S Jain, S Humeau… - arXiv preprint arXiv …, 2019","JUA Fan, SKSJS Humeau, EDT Rocktäschel…"],"snippet":"Page 1. Learning to Speak and Act in a Fantasy Text Adventure Game Jack Urbanek1 Angela Fan1,2 Siddharth Karamcheti1 Saachi Jain1 Samuel Humeau1 Emily Dinan1 Tim Rocktäschel1,3 Douwe Kiela1 Arthur Szlam1 Jason …","url":["https://arxiv.org/pdf/1903.03094","https://research.fb.com/wp-content/uploads/2019/11/Learning-to-Speak-and-Act-in-a-Fantasy-Text-Adventure-Game.pdf"]} |
| {"year":"2019","title":"Learning Word Ratings for Empathy and Distress from Document-Level User Responses","authors":["J Sedoc, S Buechel, Y Nachmany, A Buffone, L Ungar - arXiv preprint arXiv …, 2019"],"snippet":"… (2013) using 10-fold crossvalidation. For word embeddings we used off-the-shelf Fasttext subword embeddings (Mikolov et al., 2018).4 The embeddings are trained with subword information on Common Crawl (600B tokens) …","url":["https://arxiv.org/pdf/1912.01079"]} |
| {"year":"2019","title":"Leveraging Distributional and Relational Semantics for Knowledge Extraction from Textual Corpora","authors":["G ROSSIELLO, G SEMERARO, M DI CIANO - 2019"],"snippet":"Page 1. Page 2 …","url":["https://www.researchgate.net/profile/Gaetano_Rossiello/publication/333448156_Leveraging_Distributional_and_Relational_Semantics_for_Knowledge_Extraction_from_Textual_Corpora/links/5cee4fcca6fdcc18c8e9913b/Leveraging-Distributional-and-Relational-Semantics-for-Knowledge-Extraction-from-Textual-Corpora.pdf"]} |
| {"year":"2019","title":"Leveraging End-to-End Speech Recognition with Neural Architecture Search","authors":["A Baruwa, M Abisiga, I Gbadegesin, A Fakunle - arXiv preprint arXiv:1912.05946, 2019"],"snippet":"… We train a 3-gram, 5-gram and a 7-gram language model on common crawl 1. The relative performances are summarised in tables 1 and 2. Decoding is done by beam-searching for the output y that maximizes φ(c) given by …","url":["https://arxiv.org/pdf/1912.05946"]} |
| {"year":"2019","title":"Leveraging Hierarchical Representations for Preserving Privacy and Utility in Text","authors":["O Feyisetan, T Diethe, T Drake - arXiv preprint arXiv:1910.08917, 2019"],"snippet":"Page 1. Leveraging Hierarchical Representations for Preserving Privacy and Utility in Text Oluwaseyi Feyisetan Amazon sey@amazon.com Tom Diethe Amazon tdiethe@amazon.co.uk Thomas Drake Amazon draket@amazon.com …","url":["https://arxiv.org/pdf/1910.08917"]} |
| {"year":"2019","title":"Leveraging Pretrained Image Classifiers for Language-Based Segmentation","authors":["D Golub, R Martín-Martín, A El-Kishky, S Savarese - arXiv preprint arXiv:1911.00830, 2019"],"snippet":"… With Word2Vec we first embed the target labels l and the labels in the set of possible proxy labels in a shared vector space using 300-dimensional GloVe embeddings [29] trained on the Common Crawl 840B word corpus. For labels that contains multiple words …","url":["https://arxiv.org/pdf/1911.00830"]} |
| {"year":"2019","title":"Leveraging Unpaired Out-of-Domain Data for Image Captioning","authors":["X Chen, M Zhang, Z Wang, L Zuo, B Li, Y Yang - Pattern Recognition Letters, 2018"],"snippet":"Skip to main content …","url":["https://www.sciencedirect.com/science/article/pii/S0167865518309358"]} |
| {"year":"2019","title":"Leveraging Web Semantic Knowledge in Word Representation Learning","authors":["H Liu, L Fang, JG Lou, Z Li - 2019"],"snippet":"… We extract a large collection of semantic lists from the Common Crawl data7 using the patterns defined in Table 1 and filter out entries that do not exist in the vocabulary of the training data … 6http://dumps.wikimedia.org/enwiki/ 7http://commoncrawl.org/ Page 5 …","url":["https://www.aaai.org/Papers/AAAI/2019/AAAI-LiuHaoyan.142.pdf"]} |
| {"year":"2019","title":"Limsi-multisem at the ijcai semdeep-5 wic challenge: Context representations for word usage similarity estimation","authors":["AG Soler, M Apidianaki, A Allauzen - Proceedings of the 5th Workshop on Semantic …, 2019"],"snippet":"… Di- mensionality reduction is applied to a weighted average of the vectors of words in a sentence. Weighting is based on word frequency in Common Crawl. We use SIF in combination with 300- d GloVe vectors trained …","url":["https://www.aclweb.org/anthology/W19-5802"]} |
| {"year":"2019","title":"Lingua Custodia at WMT'19: Attempts to Control TerminologyauthorsF Burlot - arXiv preprint arXiv:1907.04618, 2019snippet… to the decoder. Page 2. 2 Baseline The training parallel data provided for the task consisted of nearly 10M sentences, including Europarl (Koehn, 2005), Common-crawl, Newscommentary and Bicleaner07. The former was …urlhttps://arxiv.org/pdf/1907.04618 |
| year2019titleLinked Open Data Validity--A Technical Report from ISWS 2018authorsTA Ghor, E Agrawal, M Alam, O Alqawasmeh… - arXiv preprint arXiv …, 2019snippetPage 1. Linked Open Data Validity A Technical Report from ISWS 2018 April 1, 2019 Bertinoro, Italy arXiv:1903.12554v1 [cs.DB] 26 Mar 2019 Page 2. Authors Main Editors Mehwish Alam, Semantic Technology Lab, ISTC-CNR …urlhttps://arxiv.org/pdf/1903.12554 |
| year2019titleLinking artificial and human neural representations of languageauthorsJ Gauthier, R Levy - arXiv preprint arXiv:1910.01244, 2019snippet… contrasts between the 384 sentences tested. 9We use publicly available GloVe vectors computed on Common Crawl, available in the spaCy toolkit as en vectors web lg. Page 6. 3 Results We first present the performance of …urlhttps://arxiv.org/pdf/1910.01244 |
| year2019titleLINSPECTOR: Multilingual Probing Tasks for Word RepresentationsauthorsGG Şahin, C Vania, I Kuznetsov, I Gurevych - arXiv preprint arXiv:1903.09442, 2019snippetPage 1. LINSPECTOR Multilingual Probing Tasks for Word Representations Gözde Gül Sahin∗ UKP Lab / TU Darmstadt Clara Vania∗∗ ILCC / University of Edinburgh Ilia Kuznetsov UKP Lab / TU Darmstadt Iryna Gurevych UKP Lab / TU Darmstadt …urlhttps://arxiv.org/pdf/1903.09442 |
| year2019titleLIUM's Contributions to the WMT2019 News Translation Task: Data and Systems for German-French Language Pairs","authors":["F Bougares, J Wottawa, A Baillot, L Barrault, A Bardet - … 2: Shared Task Papers, Day 1 …, 2019"],"snippet":"… As it can be seen from tables 1 and 2, the effect of the cleaning step is more pronounced for the noisy parallel corpora (ie ParaCrawl and Common Crawl) … Page 3. 131 #lines #token FR #token DE europarl-v7 1.7M 45.9M 40.9 …","url":["https://www.aclweb.org/anthology/W19-5307"]} |
| {"year":"2019","title":"Local bow-tie structure of the web","authors":["Y Fujita, Y Kichikawa, Y Fujiwara, W Souma, H Iyetomi - Applied Network Science, 2019"],"snippet":"… This fact means that the absence of self-similarity between page level and host/domain levels. Meusel et al. (2014, 2015) investigated the publicly accessible crawl of the web gathered by the Common Crawl Foundation in 2012 (CC12) (Meusel et al. 2014; 2015) …","url":["https://link.springer.com/article/10.1007/s41109-019-0127-2"]} |
| {"year":"2019","title":"Logical Layout Analysis using Deep Learning","authors":["A Zulfiqar, A Ul-Hasan, F Shafait"],"snippet":"… of the text zones. GloVE provides 300 dimensional vectors, one vector for each word. We have used the one trained on common crawl having 840 billion tokens and vectors for a total of 2.2 million words. Since we also want …","url":["https://tukl.seecs.nust.edu.pk/members/projects/conference/Logical-Layout-Analysis-using-Deep-Learning.pdf"]} |
| {"year":"2019","title":"Longitudinal Analysis of Misuse of Bitcoin⋆","authors":["K Eldefrawy, A Gehani, A Matton"],"snippet":"… its labels). Seed data was used from previously published onion data sets, references to onions in a large collection of DNS resolver logs, and an open repository of (non-onion) web crawl data, called the Common Crawl. The …","url":["http://www.csl.sri.com/users/gehani/papers/ACNS-2019.Bitcoin_Study.pdf"]} |
| {"year":"2019","title":"Look Who's Talking: Inferring Speaker Attributes from Personal Longitudinal DialogauthorsC Welch, V Pérez-Rosas, JK Kummerfeld, R Mihalcea - arXiv preprint arXiv …, 2019snippet… The word embedding inputs to the context encoder are 300 dimensional. 8 Features Word Embeddings: We obtain word vector representations for each message using the GloVe Common Crawl pre-trained model [12]. We …urlhttps://arxiv.org/pdf/1904.11610 |
| year2019titleLow Resource Sequence Tagging with Weak LabelsauthorsE Simpson, J Pfeiffer, I Gurevychsnippet… For FAMULUS, we use 300-dimensional German fastText embeddings (Grave et al. 2018), and for NER and PICO we use 300-dimensional English GloVe 3 embeddings trained on 840 billion tokens from Common Crawl. To …urlhttps://public.ukp.informatik.tu-darmstadt.de/UKP_Webpage/publications/2020/2020_AAAI_SE_LowResourceSequence.pdf |
| year2019titleLow Supervision, Low Corpus size, Low Similarity! Challenges in cross-lingual alignment of word embeddings: An exploration of the limitations of cross-lingual word …authorsA Dyer - 2019snippetPage 1. Low Supervision, Low Corpus size, Low Similarity! Challenges in cross-lingual alignment of word embeddings An exploration of the limitations of cross-lingual word embedding alignment in truly low resource scenarios Andrew Dyer …urlhttp://www.diva-portal.org/smash/get/diva2:1365879/FULLTEXT01.pdf |
| year2019titleLSTM for Dialogue Breakdown Detection: Exploration of Different Model Types and Word EmbeddingsauthorsM Hendriksen, A Leeuwenberg, MF Moenssnippet… The words are uncased. GloVe Common Crawl … The results presented in the Table 2, allow to conclude that GloVe Common Crawl demonstrate the best performance, the GloVe Twitter being the second best, the word2vec Google News is the worst. Page 9 …urlhttp://workshop.colips.org/wochat/@iwsds2019/documents/dbdc4-mariya-hendriksen-etal.pdf |
| year2019titleLTL-UDE at SemEval-2019 Task 6: BERT and Two-Vote Classification for Categorizing OffensivenessauthorsP Aggarwal, T Horsmann, M Wojatzki, T Zesch - … of the 13th International Workshop on …, 2019snippet… word representations. The resulting posting vector is re-scaled into the range zero to one. We use the pre-trained embeddings provided by Mikolov et al. (2018), which are trained on the common crawl corpus. Classifiers We …urlhttps://www.aclweb.org/anthology/S19-2121 |
| year2019titleltl. uni-due at SemEval-2019 Task 5: Simple but Effective Lexico-Semantic Features for Detecting Hate Speech in TwitterauthorsH Zhang, M Wojatzki, T Horsmann, T Zesch - … of the 13th International Workshop on …, 2019snippet… of LSTMs and CNNs (LSTM + CNN). We initialize all setups with the 300-dimensional word embeddings provided by Mikolov et al. (2018), which were trained on the common crawl corpus. Furthermore, in all setups, we use …urlhttps://www.aclweb.org/anthology/S19-2078 |
| year2019titleMachine Reading of Clinical Notes for Automated ICD CodingauthorsM Morisio, S MalacrinosnippetPage 1. Master degree course in Computer Engineering Master Degree Thesis Machine Reading of Clinical Notes for Automated ICD Coding Supervisor Prof. Maurizio Morisio Candidate Stefano Malacrin`o Internship tutors …urlhttps://webthesis.biblio.polito.it/10958/1/tesi.pdf |
| year2019titleMachine Translation of Restaurant Reviews: New Corpus for Domain Adaptation and RobustnessauthorsA Bérard, I Calapodescu, M Dymetman, C Roux… - arXiv preprint arXiv …, 2019snippet… data, we built a new training corpus named UGC (User Generated Content), closer to our domain, by combining: Multi UN, OpenSubtitles, Wikipedia, Books, Tatoeba, TED talks, ParaCrawl11 and Gourmet12 (See Table 3) …urlhttps://arxiv.org/pdf/1910.14589 |
| year2019titleMapping languages and demographics with georeferenced corporaauthorsJ Dunn, B Adams - 2019snippet… To answer this question, we collect and analyze two large global-scale datasets: web-crawled data from the Common Crawl (16.65 billion words) and social media data from Twitter (4.14 billion words). This paper evaluates demographic-type informa …urlhttps://ir.canterbury.ac.nz/bitstream/handle/10092/17132/GeoComputation_19.pdf?sequence=2 |
| year2019titleMassive vs. Curated Word Embeddings for Low-Resourced Languages. The Case of Yor\\ub\\'a and Twi","authors":["JO Alabi, K Amponsah-Kaakyire, DI Adelani… - arXiv preprint arXiv …, 2019"],"snippet":"… The resource par excellence is Wikipedia2, an online encyclopedia currently available in 307 languages3. Other initiatives such as Common Crawl4 or the Jehovahs Witnesses site5 are also repositories for multilingual …","url":["https://arxiv.org/pdf/1912.02481"]} |
| {"year":"2019","title":"Massively multilingual transfer for NER","authors":["A Rahimi, Y Li, T Cohn - Proceedings of the 57th Conference of the Association …, 2019"],"snippet":"Page 1. Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 151–164 Florence, Italy, July 28 - August 2, 2019. c 2019 Association for Computational Linguistics 151 Massively Multilingual Transfer for NER …","url":["https://www.aclweb.org/anthology/P19-1015"]} |
| {"year":"2019","title":"MASTER UNIVERSITARIO EN INGENIERÍA DE TELECOMUNICACION","authors":["DB SANCHEZ - 2019"],"snippet":"Page 1. M´ASTER UNIVERSITARIO EN INGENIERÍA DE TELECOMUNICACI´ON TRABAJO FIN DE M´ASTER DESING AND DEVELOPMENT OF A HATE SPEECH DETECTOR IN SOCIAL NETWORKS BASED ON DEEP LEARNING TECHNOLOGIES …","url":["http://oa.upm.es/55618/1/TESIS_MASTER_DIEGO_BENITO_SANCHEZ_2019.pdf"]} |
| {"year":"2019","title":"Measuring Gender Bias in Word Embeddings across Domains and Discovering New Gender Bias Word Categories","authors":["K Chaloner, A Maldonado - Proceedings of the First Workshop on Gender Bias in …, 2019"],"snippet":"… WEAT's authors applied these tests to the publicly-available GloVe embeddings trained on the English-language “Common Crawl” corpus (Pennington et al., 2014) as well as the Skip-Gram (word2vec) embeddings …urlhttps://www.aclweb.org/anthology/W19-3804 |
| year2019titleMedical Word Embeddings for Spanish: Development and EvaluationauthorsF Soares, M Villegas, A Gonzalez-Agirre, M Krallinger… - Proceedings of the 2nd …, 2019snippet… makes available Word2Vec models pre-trained on about 100 billion words from Google News corpus in English1. Regarding other languages, on FastText website2 one can download pre-trained embeddings for 157 lan …urlhttps://www.aclweb.org/anthology/W19-1916 |
| year2019titleMeemi: Finding the Middle Ground in Cross-lingual Word EmbeddingsauthorsY Doval, J Camacho-Collados, L Espinosa-Anke… - arXiv preprint arXiv …, 2019snippet… 10 Page 11. the WaCky project [23], containing 2 and 0.8 billion words, respectively.6 For Finnish and Russian, we use their corresponding Common Crawl monolingual corpora from the Machine Translation of News Shared Task 20167, composed of …urlhttps://arxiv.org/pdf/1910.07221 |
| year2019titleMembership Inference Attacks on Sequence-to-Sequence ModelsauthorsS Hisamoto, M Post, K Duh - arXiv preprint arXiv:1904.05506, 2019snippet… For example, e (d) i with d = l1 and i = 1 might refer to the first sentence in the Europarl subcorpus, while e (d) i with d = l2 and i = 1 might refer to the first sentence in the CommonCrawl subcorpus … CommonCrawl 5,000 5,000 2,389,123 2,379,123 N/A …urlhttps://arxiv.org/pdf/1904.05506 |
| year2019titleMetaphor Interpretation Using Word EmbeddingsauthorsK Bar, N Dershowitz, L Dankinsnippet… relatively large corpus. Specifically, we use DepCC,1 a dependency-parsed “web-scale corpus” based on CommonCrawl.2 There are 365 million documents in the corpus, comprising about 252B tokens. Among other preprocessing …urlhttps://pdfs.semanticscholar.org/2033/a3f7b8b53ea277a811ac450139422793b08b.pdf |
| year2019titleMethods and apparatus for detection of malicious documents using machine learningauthorsJD Saxe, R HARANG - US Patent App. 16/257,749, 2019snippet… decision tree, etc.). The memory 120 includes one or more datasets 112 (eg, a VirusTotal dataset and/or a Common Crawl dataset, as described in further detail below) and one or more training models 124. The malware detection …urlhttps://patentimages.storage.googleapis.com/fa/f8/d7/5843fb31e01d95/US20190236273A1.pdf |
| year2019titleMicrosoft Research Asia's Systems for WMT19","authors":["Y Xia, X Tan, F Tian, F Gao, W Chen, Y Fan, L Gong…"],"snippet":"… Dataset We concatenate “Europarl v9”, “News Commentary v14”, “Common Crawl corpus” and “Document-split Rapid corpus” as the ba- sic bilingual … We merge the “commoncrawl”, “europarl-v7” and part of “de-fr.bicleaner07” …","url":["http://www.statmt.org/wmt19/pdf/WMT0048.pdf"]} |
| {"year":"2019","title":"MIDAS: A Dialog Act Annotation Scheme for Open Domain Human Machine Spoken Conversations","authors":["D Yu, Z Yu - arXiv preprint arXiv:1908.10023, 2019"],"snippet":"… An example can be seen in the last USER2 utterance in Table 1. Word em- beddings are pre-trained with fastText (Mikolov et al., 2018) using Common Crawl. We evaluate the segmentation model on human labeled 2K human utterances of collected data …","url":["https://arxiv.org/pdf/1908.10023"]} |
| {"year":"2019","title":"Mining Discourse Markers for Unsupervised Sentence Representation Learning","authors":["D Sileo, T Van-De-Cruys, C Pradel, P Muller - arXiv preprint arXiv:1903.11850, 2019"],"snippet":"… We use sentences from the Depcc corpus (Panchenko et al., 2017), which consists of En- glish texts harvested from commoncrawl web data … Word embeddings are fixed GloVe embeddings with 300 dimensions, trained …","url":["https://arxiv.org/pdf/1903.11850"]} |
| {"year":"2019","title":"Mix-review: Alleviate Forgetting in the Pretrain-Finetune Framework for Neural Language Generation Models","authors":["T He, J Liu, K Cho, M Ott, B Liu, J Glass, F Peng - arXiv preprint arXiv:1910.07117, 2019"],"snippet":"… For pre-training, we use the large-scale CCNEWS data (Bakhtin et al., 2019) which is a de- duplicated subset of the English portion of the CommonCrawl news data-set1. The dataset contains news articles published worldwide …","url":["https://arxiv.org/pdf/1910.07117"]} |
| {"year":"2019","title":"MLT-DFKI at CLEF eHealth 2019: Multi-label Classification of ICD-10 Codes with BERT","authors":["S Amin, G Neumann, K Dunfield, A Vechkaeva… - CLEF (Working Notes), 2019"],"snippet":"… have stronger linguistic signals to classify the classes where German models make mistakes [1]. The baseline proved to be a strong one, with the highest precision of all and outperforming HAN and CNN models, for both German …","url":["https://www.researchgate.net/profile/Saadullah_Amin2/publication/335681972_MLT-DFKI_at_CLEF_eHealth_2019_Multi-label_Classification_of_ICD-10_Codes_with_BERT/links/5d742a00299bf1cb809043cd/MLT-DFKI-at-CLEF-eHealth-2019-Multi-label-Classification-of-ICD-10-Codes-with-BERT.pdf"]} |
| {"year":"2019","title":"Mono-and Cross-lingual Semantic Word Similarity for Urdu Language","authors":["G Fatima - 2019"],"snippet":"Page 1. I Monoand Cross-lingual Semantic Word Similarity for Urdu Language By Ghazeefa Fatima CIIT/FA17-RCS-016/LHR MS Thesis In Computer Science COMSATS University Islamabad Lahore Campus Page …","url":["http://dspace.cuilahore.edu.pk/xmlui/bitstream/handle/123456789/1571/Thesis.pdf?sequence=1"]} |
| {"year":"2019","title":"MoRTy: Unsupervised Learning of Task-specialized Word Embeddings by Autoencoding","authors":["N Rethmeier, B Plank - Proceedings of the 4th Workshop on Representation …, 2019"],"snippet":"… Hence, we demonstrate the method's application for single-task, multi-task, small, medium and web-scale (common crawl) corpus-size settings (Section 4). Learning to scale-up by pretraining on more (un-)labeled data is both: (a) not always possible in low-resource …urlhttps://www.aclweb.org/anthology/W19-4307 |
| year2019titleMulti-class Document Classification Using Improved Word EmbeddingsauthorsBA Rabut, AC Fajardo, RP Medina - Proceedings of the 2nd International Conference …, 2019snippet… ACM ISBN 978-1-4503-7290-9/19/10…$15.00 https://doi.org/10.1145/3366650.3366661 42 Page 2. Common crawl)[7]. The pre-trained word embedding vectors serve as input in the classification algorithm for evaluation and prediction …urlhttps://dl.acm.org/citation.cfm?id=3366661 |
| year2019titleMulti-domain Dialogue State Tracking as Dynamic Knowledge Graph Enhanced Question AnsweringauthorsL Zhou, K Small - arXiv preprint arXiv:1911.06192, 2019snippet… For experiments with GloVe embeddings, we use GloVe embeddings pre-trained on Common Crawl dataset.3 The dimension of GloVe embeddings is 300, and the dimension of character-level embeddings is 100, such that Dw = 400 …urlhttps://arxiv.org/pdf/1911.06192 |
| year2019titleMulti-Granular Text Encoding for Self-Explaining CategorizationauthorsZ Wang, Y Zhang, M Yu, W Zhang, L Pan, L Song, K Xu… - arXiv preprint arXiv …, 2019snippet… for each set. Hyperparameters We use the 300-dimensional GloVe word vectors pre-trained from the 840B Common Crawl corpus (Pennington et al., 2014), and set the hidden size as 100 for node embeddings. We apply dropout …urlhttps://arxiv.org/pdf/1907.08532 |
| year2019titleMulti-Hop Paragraph Retrieval for Open-Domain Question AnsweringauthorsY Feldman, R El-Yaniv - arXiv preprint arXiv:1906.06606, 2019snippetPage 1. Multi-Hop Paragraph Retrieval for Open-Domain Question Answering Yair Feldman and Ran El-Yaniv Department of Computer Science Technion – Israel Institute of Technology Haifa, Israel {yairf11, rani}@cs.technion.ac.il Abstract …urlhttps://arxiv.org/pdf/1906.06606 |
| year2019titleMulti-Resolution Models for Learning Multilevel Abstract Representation with Application to Information RetrievalauthorsT Cakaloglu - 2019snippetPage 1. MULTI-RESOLUTION MODELS FOR LEARNING MULTILEVEL ABSTRACT REPRESENTATION WITH APPLICATION TO INFORMATION RETRIEVAL A Dissertation Submitted to the Graduate School University of Arkansas at Little Rock …urlhttp://search.proquest.com/openview/4bce4201a6d742c4c771e08b17dec0cb/1?pq-origsite=gscholar&cbl=18750&diss=y |
| year2019titleMulti-Team: A Multi-attention, Multi-decoder Approach to Morphological Analysis.authorsA Ustün, R van der Goot, G Bouma, G van Noordsnippet… 2018). For FastText, two sets of pre-trained embeddings are available: one is trained only on Wikipedia (Bojanowski et al., 2017), whereas the newer versions are also trained on CommonCrawl (Grave et al., 2018). Whenever …urlhttp://www.robvandergoot.com/doc/sigmorphon2019.pdf |
| year2019titleMultilingual Culture-Independent Word Analogy DatasetsauthorsM Ulčar, M Robnik-Šikonja - arXiv preprint arXiv:1911.10038, 2019snippet… language is shown in the Table 6. Table 6: Percentage of constructed analogy pairs covered by the first 200,000 word vectors from common crawl fastText embeddings. Language Coverage (%) Croatian 81.67 English 97.05 …urlhttps://arxiv.org/pdf/1911.10038 |
| year2019titleMultilingual Fake News Detection with SatireauthorsG Guibon, L Ermakova, H Seffih, A Firsov…snippet… Detection of Deception. Non-verbal communication (2014), https://nvc.uvt.nl/pdf/7.pdf 6. Bevendorff, J., Stein, B., Hagen, M., Potthast, M.: Elastic chatnoir: Search engine for the clueweb and the common crawl. In: Pasi, G., Piwowarski …urlhttps://www.researchgate.net/profile/Guillaume_Le_Noe-Bienvenu/publication/332803834_Multilingual_Fake_News_Detection_with_Satire_on_Vaccination_Topic/links/5d24917a458515c11c1f8724/Multilingual-Fake-News-Detection-with-Satire-on-Vaccination-Topic.pdf |
| year2019titleMultilingual is not enough: BERT for FinnishauthorsA Virtanen, J Kanerva, R Ilo, J Luoma, J Luotolahti… - arXiv preprint arXiv …, 2019snippet… Second, we selected texts from the Common Crawl project6 by running aa map-reduce language detection job on the plain text material from Common Crawl. These sources were supplemented with plain text extracted …urlhttps://arxiv.org/pdf/1912.07076 |
| year2019titleMultilingual Sentence-Level Bias Detection in WikipediaauthorsD Aleksandrova, F Lareau, PA Ménardsnippet… Same BOW n-gram size and BOW size and value type as SGD. 5Available for 157 languages, pretrained on Common Crawl and Wikipedia (Grave et al., 2018) https:// fasttext.cc/docs/en/crawl-vectors.html 6Version 0.21.2 of the sklearn toolkit …urlhttps://www.researchgate.net/profile/Desislava_Aleksandrova/publication/334612399_Multilingual_Sentence-Level_Bias_Detection_in_Wikipedia/links/5d5bd0c392851c37636bfdf2/Multilingual-Sentence-Level-Bias-Detection-in-Wikipedia.pdf |
| year2019titleMultimodal deep networks for text and image-based document classificationauthorsN Audebert, C Herold, K Slimani, C Vidal - APIAsnippet… For both methods, we use the SpaCy small English model [33] to perform the tokenization and punctuation removal. Individual word embeddings are then inferred using FastText [29] pretrained on the Common Crawl dataset …urlhttps://www.irit.fr/pfia2019/wp-content/uploads/2019/07/Actes_CH_PFIA2019.pdf#page=14 |
| year2019titleMultimodal Machine Translation with Embedding PredictionauthorsT Hirasawa, H Yamagishi, Y Matsumura, M Komachi - arXiv preprint arXiv …, 2019snippet… model. “+ pretrained” models are initialized with pretrained embeddings. 2018). These word embeddings are trained on Wikipedia and Common Crawl using the CBOW algorithm, and the dimension is 300. The embedding …urlhttps://arxiv.org/pdf/1904.00639 |
| year2019titleMultimodal Sentiment Analysis Using Deep LearningauthorsR Sharma, N Le Tan, F Sadat - 2018 17th IEEE International Conference on Machine …, 2018snippet… For the CNN model we used pre-trained word embeddings (GloVe 840B.300d). This is a 300-dimensional word embedding trained on 840 billion tokens from the common crawl dataset. The maximum sequence length is 200 …urlhttps://ieeexplore.ieee.org/abstract/document/8614265/ |
| year2019titleNamed entity recognition for PolishauthorsM Marcińczuk, A Wawer - Poznan Studies in Contemporary Linguistics, 2019snippetAbstractIn this article we discuss the current state-of-the-art for named entity recognition for Polish. We present publicly available resources and open-source tools for named entity recognition. The overview includes various …urlhttps://www.degruyter.com/view/j/psicl.2019.55.issue-2/psicl-2019-0010/psicl-2019-0010.xml |
| year2019titleNamed Entity Recognition for Social Media TextauthorsY Zhang - 2019snippet… We use two different pre-trained word embeddings based on Common Crawl data, which contains 840 billion tokens and 2.2 million vocabulary and Twitter data which contains 2 billion tweets, 27 billion tokens, and 1.2 million vocabulary …urlhttps://uu.diva-portal.org/smash/get/diva2:1366031/FULLTEXT01.pdf |
| year2019titleNamed Entity Recognition Using Gazetteer of Hierarchical EntitiesauthorsM Štravs, J Zupančič - … Conference on Industrial, Engineering and Other …, 2019snippet… To summarize, the proposed entity recognition method was tested using two languages (Slovenian and English), six different distance measures, and two different vector embeddings from Wikipedia (Wiki WV) and Common Crawl (CC WV) …urlhttps://link.springer.com/chapter/10.1007/978-3-030-22999-3_65 |
| year2019titleNamed-entity recognition in Czech historical texts: Using a CNN-BiLSTM neural network modelauthorsH Hubková - 2019snippet… We also tried to work with published pretrained word embeddings of contemporary Czech words provided by fastText6. These were trained on more than 178 millions of tokens from Wikipedia and 13 billions tokens based on common crawl (Grave et al., 2018) …urlhttp://www.diva-portal.org/smash/get/diva2:1325355/FULLTEXT01.pdf |
| year2019titleNatural Language Processing for Book Recommender SystemsauthorsH Alharthi - 2019snippetPage 1. Natural Language Processing for Book Recommender Systems by Haifa Alharthi Thesis submitted in partial fulfillment of the requirements for the PhD degree in Computer Science School of Electrical Engineering and Computer Science Faculty of Engineering …urlhttps://www.ruor.uottawa.ca/bitstream/10393/39134/1/Alharthi_Haifa_2019_thesis.pdf |
| year2019titleNatural language processing using context-specific word vectorsauthorsB McCann, C Xiong, R Socher - US Patent App. 15/982,841, 2018snippet… in the second language. In some examples, training of an MT-LSTM of the encoder 310 uses fixed 300-dimensional word vectors, such as the CommonCrawl-840B GloVe model for English word vectors. These word vectors …urlhttps://patentimages.storage.googleapis.com/49/87/1a/0d4e316e8e4194/US20180373682A1.pdf |
| year2019titleNaver Labs Europe's Systems for the WMT19 Machine Translation Robustness Task","authors":["A Bérard, I Calapodescu, C Roux - arXiv preprint arXiv:1907.06488, 2019"],"snippet":"… 3.1 Pre-processing CommonCrawl filtering We first spent efforts on filtering and cleaning the WMT data (in particular CommonCrawl) … We filtered CommonCrawl as follows: we trained a baseline FR→EN model on WMT without …","url":["https://arxiv.org/pdf/1907.06488"]} |
| {"year":"2019","title":"Nested Variational Autoencoder for Topic Modeling on Microtexts with Word Vectors","authors":["T Trinh, T Quan, T Mai - arXiv preprint arXiv:1905.00195, 2019"],"snippet":"Page 1. Noname manuscript No. (will be inserted by the editor) Nested Variational Autoencoder for Topic Modeling on Microtexts with Word Vectors Trung Trinh · Tho Quan · Trung Mai Received: date / Accepted: date Abstract …","url":["https://arxiv.org/pdf/1905.00195"]} |
| {"year":"2019","title":"NeuMorph: Neural Morphological Tagging for Low-Resource Languages—An Experimental Study for Indic Languages","authors":["A Chakrabarty, A Chaturvedi, U Garain - ACM Transactions on Asian and Low …, 2019"],"snippet":"Page 1. 16 NeuMorph: Neural Morphological Tagging for Low-Resource Languages— An Experimental Study for Indic Languages ABHISEK CHAKRABARTY, AKSHAY CHATURVEDI, and UTPAL GARAIN, Indian Statistical Institute, India …","url":["https://dl.acm.org/citation.cfm?id=3342354"]} |
| {"year":"2019","title":"Neural Conversation Recommendation with Online Interaction Modeling","authors":["X Zeng, J Li, L Wang, KF Wong"],"snippet":"Page 1. Neural Conversation Recommendation with Online Interaction Modeling Xingshan Zeng1,2, Jing Li3∗, Lu Wang4, Kam-Fai Wong1,2 1The Chinese University of Hong Kong, Hong Kong, China 2MoE Key Laboratory …","url":["https://www.ccs.neu.edu/home/luwang/papers/EMNLP2019_zeng_li_wang_wong.pdf"]} |
| {"year":"2019","title":"Neural Facet Detection on Medical Resources","authors":["T Steffek - 2019"],"snippet":"Page 1. Neural Facet Detection on Medical Resources Thomas Steffek April 2, 2019 Page 2. Page 3. Beuth Hochschule für Technik Fachbereich VI - Informatik und Medien Database Systems and Text-based Information Systems (DATEXIS) Bachelor's thesis …urlhttps://prof.beuth-hochschule.de/fileadmin/prof/aloeser/Bachelorarbeit_Thomas-Steffek_with-title-page-1.1.pdf |
| year2019titleNeural Feature Extraction for Contextual Emotion DetectionauthorsE Mohammadi, H Amini, L Kosseimsnippet… pretrained word embeddings. As the first word embedder, we chose GloVe (Pennington et al., 2014), which is pretrained on 840B tokens of web data from Common Crawl, and provides 300d vectors as word embeddings. As our sec …urlhttps://www.researchgate.net/profile/Hessam_Amini/publication/335704122_Neural_Feature_Extraction_for_Contextual_Emotion_Detection/links/5d76d6764585151ee4ab0908/Neural-Feature-Extraction-for-Contextual-Emotion-Detection.pdf |
| year2019titleNeural Grammatical Error Correction by Simulating the Human Learner and the Human ProofreaderauthorsF Gaim, JW Chung, JC Park - 한국정보과학회 학술발표논문집, 2018snippet… For this and the contrastive learning, we use a large 5-gram language model trained on the Common Crawl data [8]. Training and Decoding: To effectively handle out-of- vocabulary words, we use sub-word level tokenization and …urlhttp://www.dbpia.co.kr/Journal/ArticleDetail/NODE07613671 |
| year2019titleNeural Machine Translation for English–Kazakh with Morphological Segmentation and Synthetic DataauthorsA Toral, L Edman, G Yeshmagambetova, J Spenader - … 2: Shared Task Papers, Day 1 …, 2019snippet… 7.5 0.19 0.16 Wikititles 117.0 0.23 0.19 Table 1: Preprocessed EN–KK parallel training data. Words (M) Corpus Sentences (k) EN RU Common crawl 871.8 20.82 19.97 News-comm … Corpus Threshold Pairs left (k) CommonCrawl 0.7323 568.50 News Comm …urlhttps://www.aclweb.org/anthology/W19-5343 |
| year2019titleNeural network learning engineauthorsCM Ormerod - US Patent App. 16/286,566, 2019snippet… skill and not to limit the invention to any one embodiment, commercial word embedding tools can include Google News word embedding, which has been trained on an extensive corpus of news items, and/or GloVe word …urlhttps://patentimages.storage.googleapis.com/94/fd/43/d4a3cbb7706fec/US20190266234A1.pdf |
| year2019titleNeural network-based approaches for biomedical relation classification: A reviewauthorsY Zhang, H Lin, Z Yang, J Wang, Y Sun, B Xu, Z Zhao - Journal of Biomedical …, 2019snippet… Word2vec, Google news, https://code.google.com/archive/p/word2vec. GloVe, Wikipedia, Gigaword, Common Crawl, Twitter, https://nlp.stanford. edu/projects/glove. fastText, Wikipedia, UMBC corpus, news corpus …urlhttps://www.sciencedirect.com/science/article/pii/S1532046419302138 |
| year2019titleNeural NLP models under low-supervision scenariosauthorsY Zhang - 2019snippetPage 1. Copyright by Ye Zhang 2019 Page 2. The Dissertation Committee for Ye Zhang certifies that this is the approved version of the following dissertation: Neural NLP Models Under Low-supervision Scenarios Committee: Matthew A Lease, Supervisor …urlhttps://repositories.lib.utexas.edu/bitstream/handle/2152/75032/ZHANG-DISSERTATION-2019.pdf?sequence=1 |
| year2019titleNeural Text Style Transfer via Denoising and RerankingauthorsJ Lee, Z Xie, C Wang, M Drach, D Jurafsky, AY Ng - … of the Workshop on Methods for …, 2019snippet… 3. Fluency The post-transfer sentence should remain grammatical and fluent. We use the average log probability of the sentence posttransfer with respect to a language model trained on CommonCrawl as our measure of fluency …urlhttps://www.aclweb.org/anthology/W19-2309 |
| year2019titleNLNDE: The Neither-Language-Nor-Domain-Experts' Way of Spanish Medical Document De-Identification","authors":["L Lange, H Adel, J Strötgen - 2019"],"snippet":"… S2 (FLAIR+fastText): In contrast to all other runs, the second run uses only domain-independent embeddings, ie, embeddings that have been trained on standard narrative and news data from Common Crawl and Wikipedia …","url":["http://ceur-ws.org/Vol-2421/MEDDOCAN_paper_5.pdf"]} |
| {"year":"2019","title":"NLP@ UIOWA at SemEval-2019 Task 6: Classifying the Crass using Multi-windowed CNNs","authors":["J Rusert, P Srinivasan - Proceedings of the 13th International Workshop on …, 2019"],"snippet":"… Word embeddings for Non-Out of Vocabulary (OOV) words are obtained from Glove (Pennington et al., 2014) which has been trained on Twitter data3. Experiments were also conducted with Glove common crawl data, but no visible improvement was found …","url":["https://www.aclweb.org/anthology/S19-2125"]} |
| {"year":"2019","title":"Noisy Parallel Corpus Filtering through Projected Word Embeddings","authors":["M Kurfalı, R Östling - Proceedings of the Fourth Conference on Machine …, 2019"],"snippet":"… Larger monolingual corpora based on Wikipedia and common crawl data were also provided.2 To train our model, we use all the parallel data available for the English-Sinhala and EnglishNepali pairs (summarized …","url":["https://www.aclweb.org/anthology/W19-5438"]} |
| {"year":"2019","title":"NRC Parallel Corpus Filtering System for WMT 2019","authors":["G Bernier-Colborne, C Lo - Proceedings of the Fourth Conference on Machine …, 2019"],"snippet":"… embedding models. Common Crawl data was not used to train the bilingual word embeddings. 2.2 … representation layer. We used XLM to train a model using almost all the available data, except for the monolingual English Common Crawl data. This …","url":["https://www.aclweb.org/anthology/W19-5434"]} |
| {"year":"2019","title":"Observing Dialogue in Therapy: Categorizing and Forecasting Behavioral Codes","authors":["J Cao, M Tanana, ZE Imel, E Poitras, DC Atkins…"],"snippet":"Page 1. Observing Dialogue in Therapy: Categorizing and Forecasting Behavioral Codes Jie Cao†, Michael Tanana‡, Zac E. Imel‡, Eric Poitras‡, David C. Atkins♦, Vivek Srikumar† †School of Computing, University of Utah …","url":["https://svivek.com/research/publications/cao2019observing.pdf"]} |
| {"year":"2019","title":"Observing LOD Using Equivalent Set Graphs: It Is Mostly Flat and Sparsely Linked","authors":["L Asprino, W Beek, P Ciancarini, F van Harmelen… - International Semantic Web …, 2019"],"snippet":"… The two largest available crawls of LOD available today are WebDataCommons and LOD-a-lot. WebDataCommons 2 [12] consists of \\(\\sim \\)31B triples that have been extracted from the CommonCrawl datasets (November 2018 version) …","url":["https://link.springer.com/chapter/10.1007/978-3-030-30793-6_4"]} |
| {"year":"2019","title":"Observing the LOD Cloud using Equivalent Set Graphs: the LOD Cloud is mostly flat and sparsely linked","authors":["L Asprino, W Beek, P Ciancarini, F van Harmelen…"],"snippet":"… The two largest available crawls of LOD available today are WebDataCommons and LOD-a-lot. WebDataCommons5 [12] consists of ∼31B triples that have been extracted from the CommonCrawl datasets (November 2018 version) …","url":["https://www.cs.vu.nl/~frankh/postscript/ISWC2019-LODanalytics.pdf"]} |
| {"year":"2019","title":"OECD Analytical Database on Individual Multinationals and their Affiliates (ADIMA)","authors":["G Pilgrim, N Ahmad, D Doyle - 2019"],"snippet":"… Secondly, information from MNE webpages is used from an open source 'copy of the internet' generated via web crawling from the Common Crawl 4 . This process develops a graph of the links between companies, from …","url":["https://www.gtap.agecon.purdue.edu/resources/download/9310.docx"]} |
| {"year":"2019","title":"Offensive Language and Hate Speech Detection for Danish","authors":["GI Sigurbergsson, L Derczynski - arXiv preprint arXiv:1908.04531, 2019"],"snippet":"… sample of text. Pre-trained Embeddings. The pre-trained FastText [24] embeddings are trained on data from the Common Crawl project and Wikipedia, in 157 languages (including English and Danish). FastText also provides …","url":["https://arxiv.org/pdf/1908.04531"]} |
| {"year":"2019","title":"On extracting data from tables that are encoded using HTML","authors":["JC Roldán, P Jiménez, R Corchuelo - Knowledge-Based Systems, 2019"],"snippet":"Skip to main content Skip to article …","url":["https://www.sciencedirect.com/science/article/pii/S095070511930509X"]} |
| {"year":"2019","title":"On Implementing the Binary Interpolative Coding Algorithm","authors":["GE PIBIRI - 2019"],"snippet":"… Table 4. Decoding time measured in average nanoseconds spent per decoded integer, for the run-aware implementation (ra) and for the not run-aware implementation. • CCNews is an English subset of the freely available news from CommonCrawl 3, consisting of …","url":["http://pages.di.unipi.it/pibiri/papers/BIC.pdf"]} |
| {"year":"2019","title":"On Measuring and Mitigating Biased Inferences of Word Embeddings","authors":["S Dev, T Li, J Phillips, V Srikumar - arXiv preprint arXiv:1908.09369, 2019"],"snippet":"Page 1. arXiv:1908.09369v1 [cs.CL] 25 Aug 2019 On Measuring and Mitigating Biased Inferences of Word Embeddings Sunipa Dev, Tao Li, Jeff Phillips, Vivek Srikumar School of Computing University of Utah Abstract Word …","url":["https://arxiv.org/pdf/1908.09369"]} |
| {"year":"2019","title":"On Measuring Social Biases in Sentence Encoders","authors":["C May, A Wang, S Bordia, SR Bowman, R Rudinger - arXiv preprint arXiv:1903.10561, 2019"],"snippet":"Page 1. On Measuring Social Biases in Sentence Encoders Chandler May1 Alex Wang2 Shikha Bordia2 Samuel R. Bowman2 Rachel Rudinger1 1Johns Hopkins University 2New York University {cjmay,rudinger}@jhu.edu {alexwang,sb6416,bowman}@nyu.edu Abstract …","url":["https://arxiv.org/pdf/1903.10561"]} |
| {"year":"2019","title":"On Optimally Partitioning Variable-Byte Codes","authors":["GE Pibiri, R Venturini - IEEE Transactions on Knowledge and Data …, 2019"],"snippet":"Page 1. 1041-4347 (c) 2018 IEEE. Personal use is permitted, but republication/ redistribution requires IEEE permission. See http://www.ieee.org/ publications_standards/publications/rights/index.html for more information. This …","url":["https://ieeexplore.ieee.org/abstract/document/8691421/"]} |
| {"year":"2019","title":"On relevance of enriching word embeddings in solving Natural Language Inference problem","authors":["T Wesołowski"],"snippet":"Page 1. Jagiellonian University Faculty of Mathematics and Computer Science Theoretical Computer Science Stationary Studies Index number: 1079621 Tomasz Wesołowski On relevance of enriching word embeddings in solving Natural Language Inference problem …","url":["http://algo.edu.pl/OnRelevanceOfWordEmbeddings.pdf"]} |
| {"year":"2019","title":"On Slicing Sorted Integer Sequences","authors":["GE Pibiri - arXiv preprint arXiv:1907.01032, 2019"],"snippet":"… 2009. • CCNews is a dataset of news freely available from CommonCrawl: http://commoncrawl.org/ 2016/10/news-dataset-available. Precisely, the datasets consists of the news appeared from 09/01/16 to 30/03/18. Identifiers …","url":["https://arxiv.org/pdf/1907.01032"]} |
| {"year":"2019","title":"On the Effect of Low-Frequency Terms on Neural-IR Models","authors":["S Hofstätter, N Rekabsaz, C Eickhoff, A Hanbury - arXiv preprint arXiv:1904.12683, 2019"],"snippet":"… collection. The details of the resulting 1Provided in the form of evaluation tuples: top1000.dev.tsv 242B lower-cased (CommonCrawl) from: https://nlp.stanford.edu/ projects/glove/ Table 1: Left: Details of the vocabularies. Right …","url":["https://arxiv.org/pdf/1904.12683"]} |
| {"year":"2019","title":"On the Robustness of Unsupervised and Semi-supervised Cross-lingual Word Embedding Learning","authors":["Y Doval, J Camacho-Collados, L Espinosa-Anke… - arXiv preprint arXiv …, 2019"],"snippet":"… google.com/site/rmyeid/projects/polyglot 2The sources of the web-corpora are: UMBC (Han et al., 2013), 1-billion (Cardellino, 2016), itWaC and sdeWaC (Ba- roni et al., 2009), Hamshahri (AleAhmad et al., 2009), and Common Crawl downloaded from http://www …","url":["https://arxiv.org/pdf/1908.07742"]} |
| {"year":"2019","title":"On Using Machine Learning to Identify Knowledge in API Reference Documentation","authors":["D Fucci, A Mollaalizadehbahnemiri, W Maalej - arXiv preprint arXiv:1907.09807, 2019"],"snippet":"… For the deep learning classifiers in our benchmark, we train GloVe [19] embeddings based on four large corpora, summarized in Table 3. The Common Crawl (CC) is a pre-trained embedding downloaded in …","url":["https://arxiv.org/pdf/1907.09807"]} |
| {"year":"2019","title":"On Using SpecAugment for End-to-End Speech Translation","authors":["P Bahar, A Zeyer, R Schlüter, H Ney"],"snippet":"… For MT training, we use the TED, and the OpenSubtitles2018 corpora, as well as the data provided by the WMT 2018 evaluation (Europarl, ParaCrawl, CommonCrawl, News Commentary, and Rapid), a total of 65M lines of parallel sentences …","url":["https://www-i6.informatik.rwth-aachen.de/publications/download/1122/Bahar-IWSLT-2019.pdf"]} |
| {"year":"2019","title":"One Epoch Is All You Need","authors":["A Komatsuzaki - arXiv preprint arXiv:1906.06669, 2019"],"snippet":"… Trinh & Le (2018) pointed out that CommonCrawl contains a large portion of corrupt samples, which makes it unsuitable for the training. The proportion of the corrupt samples in CommonCrawl is substantially higher than 50 …","url":["https://arxiv.org/pdf/1906.06669"]} |
| {"year":"2019","title":"Online Parallel Data Extraction with Neural Machine Translation","authors":["D Ruiter - 2019"],"snippet":"Page 1. Universität des Saarlandes Master's Thesis Online Parallel Data Extraction with Neural Machine Translation submitted in fulfillment of the degree requirements of the MSc in Language Science and Technology at Saarland University …urlhttps://www.clubs-project.eu/assets/publications/other/MSc_Thesis_Ruiter.pdf |
| year2019titleOntological Traceability using Natural Language ProcessingauthorsR Benitez - 2019snippetPage 1. Ontological Traceability using Natural Language Processing A master thesis presented by Edder de la Rosa Benitez Submitted to the Department of Organization and Information in partial fulfillment of the …urlhttps://dspace.library.uu.nl/bitstream/handle/1874/383214/Master_Thesis_E_De_la_Rosa.pdf?sequence=2 |
| year2019titleOpenCeres: When Open Information Extraction Meets the Semi-Structured WebauthorsC Lockard, P Shiralkar, XL Dongsnippet… 5.1 Experimental Setup Datasets: Our primary dataset is the augmented SWDE corpus described in Section 4. In addition, we used the set of 315 movie websites (comprising 433,000 webpages) found in Common …urlhttp://lunadong.com/publication/openCeres_naacl.pdf |
| year2019titleOPTIMIZE THE LEARNING RATE OF NEURAL ARCHITECTURE IN MYANMAR STEMMERauthorsY Oo, KM Soesnippet… Word vector pre-trained on large text corpora have been released on [10] \ that trained on 3 billion words from Wikipedia and Common Crawl using Continuous bag-of-words (CBOW) 300-dimension …urlhttps://www.academia.edu/download/61248451/120191117-8847-1ko3nhm.pdf |
| year2019titleOptimizer Comparison with Dropout for Neural Sequence Labeling in Myanmar StemmerauthorsO Yadanar, KM Soe - 2019 IEEE International Conference on Industry 4.0 …, 2019snippet… Parameter initialization: It has used Learning Word Vectors for 157 Languages that trained on 3 billion words from Wikipedia and Common Crawl using CBOW 300-dimension (E. Grave, P. Bojanowski, P. Gupta, A. Joulin, T. Mikolov,2018) for both word and character …urlhttps://ieeexplore.ieee.org/abstract/document/8784850/ |
| year2019titleOptimizing Social Media Data Using Genetic AlgorithmauthorsS Das, AK Kolya, D Das - Metaheuristic Approaches to Portfolio Optimization, 2019snippetPage 1. 126 Copyright © 2019, IGI Global. Copying or distributing in print or electronic forms without written permission of IGI Global is prohibited. Chapter 6 DOI: 10.4018/978-1-5225-8103-1.ch006 ABSTRACT Twitter-based …urlhttps://www.igi-global.com/chapter/optimizing-social-media-data-using-genetic-algorithm/233176 |
| year2019titleOverview of the CLEF eHealth Evaluation Lab 2019authorsE Kanoulas, D Li, L Azzopardi, R Spijker, G Zuccon… - Experimental IR Meets …snippet… More specifically, for the Abstract and Title Screening subtask the PubMed Document Identifiers (PMIDs) of potentially relevant 4http://commoncrawl. org/(last accessed on 28 May 2019) … It consists of web pages acquired from the CommonCrawl …urlhttp://books.google.de/books?hl=en&lr=lang_en&id=LqGsDwAAQBAJ&oi=fnd&pg=PA322&dq=commoncrawl&ots=8duC39Wv1R&sig=Tt9HYmgrR17eWjcTJPZvsij9B5g |
| year2019titleP-SIF: Document Embeddings Using Partition AveragingauthorsV Gupta, A Saw, P Nokhiz, P Netrapalli, P Rai…snippet… Page 5. evaluation. We use the PARAGRAM-SL999 (PSL) as word embeddings, obtained by training on the PPDB dataset. 7 We use the fixed weighting parameter a value of 10−3, and the word frequencies p(w) are estimated from the commoncrawl dataset …urlhttps://vgupta123.github.io/docs/AAAI-GuptaV.3656.pdf |
| year2019titleP2L: Predicting Transfer Learning for Images and Semantic RelationsauthorsB Bhattacharjee, N Codella, JR Kender, S Huo… - arXiv preprint arXiv …, 2019snippet… We use the CC-DBP [12] dataset: the text of Common Crawl1 and the semantic relations schema and training data from DBpedia [1]. DBpedia is a knowledge graph extracted from the infoboxes from Wikipedia … 4.3.2 Validation on Common Crawl - DBpedia …urlhttps://arxiv.org/pdf/1908.07630 |
| year2019titlePaDAWaNSauthorsTLM BrandssnippetPage 1. PaDAWaNS Proactive Domain Abuse Warning and Notification System by TLM Brands to obtain the degree of Master of Science at the Delft University of Technology, to be defended publicly on Tuesday January 15, 2019 at 11:00 AM …urlhttps://www.sidnlabs.nl/downloads/theses/thesis_brands_padawans.pdf |
| year2019titleParallel External Memory Wavelet Tree and Wavelet Matrix ConstructionauthorsJ Ellert, F Kurpicz - International Symposium on String Processing and …, 2019snippet… CC \\((\\sigma =242)\\) contains websites (without HTML tags) that have been crawled by the Common Crawl corpus (http://commoncrawl.org), and. Wiki \\((\\sigma =213)\\) are recent Wikipedia dumps containing XML files that …urlhttps://link.springer.com/chapter/10.1007/978-3-030-32686-9_28 |
| year2019titleParaphrase-Sense-Tagged SentencesauthorsA Cocos, C Callison-Burch, S Chen, D Khashabi… - Transactions, 2019snippetSkip to main content …urlhttp://callison-burch.github.io/publications.html |
| year2019titlePDRCNN: Precise Phishing Detection with Recurrent Convolutional Neural NetworksauthorsW Wang, F Zhang, X Luo, S Zhang - Security and Communication Networks, 2019snippet… This method first encodes the URL string using the one-hot encoding method, and then inputs each encoded character vector into the LSTM neurons for training and testing. The method achieved an accuracy of 0.935 on the …urlhttp://downloads.hindawi.com/journals/scn/2019/2595794.pdf |
| year2019titlePeer Review and the Production of Scholarly Knowledge: Automated Textual Analysis of Manuscripts Revised for Publication in Administrative Science QuarterlyauthorsD Strang, F Dokshin - The Production of Managerial Knowledge and …, 2019snippet… numbers, and filter out “stop words.” Stop words are the most common words in the English language (eg, “the,” “not,” “a”). 2 Next, for each word in the pre-processed sentences, we generate word vectors from a GloVe model …urlhttps://www.emeraldinsight.com/doi/abs/10.1108/S0733-558X20190000059006 |
| year2019titlePEGASUS: Pre-training with Extracted Gap-sentences for Abstractive SummarizationauthorsJ Zhang, Y Zhao, M Saleh, PJ Liu - arXiv preprint arXiv:1912.08777, 2019snippet… T5 (Raffel et al., 2019) generalized the text-to- text framework to a variety of NLP tasks and showed the advantage of scaling up model size (to 11 billion parameters) and pre-training corpus, introducing C4, a massive text corpus …urlhttps://arxiv.org/pdf/1912.08777 |
| year2019titlePeople represent mental states in terms of rationality, social impact, and valence: Validating the 3d Mind ModelauthorsMA Thornton, D TamirsnippetPage 1. Running head: MENTAL STATE DIMENSIONS 1 People represent mental states in terms of rationality, social impact, and valence: Validating the 3d Mind Model Mark A. Thornton* and Diana I. Tamir Department of …urlhttps://psyarxiv.com/akhpq/download?format=pdf |
| year2019titlePhishFry–A Proactive Approach to Classify Phishing Sites using SCIKIT LearnauthorsD Brites, M Weisnippet… [Online]. Available: http://5000best.com/websites/. [Accessed 2019]. [26] OpenPhish, \ 2019. [Online]. Available: https://openphish.com/. [27] Amazon Web Services, \ Amazon, 2019. [Online] …urlhttps://www.shsu.edu/mxw032/publication/19gc-bw.pdf |
| year2019titlePhishing Detection Based on Machine Learning and Feature Selection MethodsauthorsM Almseidin, AMA Zuraiq, M Al-kasassbeh, N Alnidami - International Journal of …, 2019snippet… Phishing webpages are collected from Phish-Tank and Open-Phish, while legitimate web-pages are collected from Alexa and Common Crawl. These web-pages are downloaded on two distinct sessions, from January to May 2015 and through May to June 2017 …urlhttps://onlinejour.journals.publicknowledgeproject.org/index.php/i-jim/article/download/11411/6259 |
| year2019titlePhishing URL Detection Via Capsule-Based Neural NetworkauthorsY Huang, J Qin, W Wen - 2019 IEEE 13th International Conference on Anti …, 2019snippet… [27] VirusTotal, https://www.virustotal.com/ [28] Common Crawl, https://commoncrawl.org/ [29] J. Ma, LK Saul, S. Savage, and GM VoelNer, “Beyond blacNlists: learning to detect malicious web sites from suspicious …urlhttps://ieeexplore.ieee.org/abstract/document/8925000/ |
| year2019titlePivot-based Transfer Learning for Neural Machine Translation between Non-English LanguagesauthorsY Kim, P Petrov, P Petrushkov, S Khadivi, H Ney - arXiv preprint arXiv:1909.09524, 2019snippetPage 1. Pivot-based Transfer Learning for Neural Machine Translation between Non-English Languages Yunsu Kim1∗ Petre Petrov1,2∗ Pavel Petrushkov2 Shahram Khadivi2 Hermann Ney1 1RWTH Aachen University, Aachen …urlhttps://arxiv.org/pdf/1909.09524 |
| year2019titlePKUSE at SemEval-2019 Task 3: Emotion Detection with Emotion-Oriented Neural Attention NetworkauthorsL Ma, L Zhang, W Ye, W Hu - Proceedings of the 13th International Workshop on …, 2019snippet… Table 1: Datasets for Semeval-2019 Task 3. 4.2 Experiments The model is implemented using Keras 2.0 (Chollet et al., 2017). We experiment with Stanford's GloVe 300 dimensional word embeddings trained on 840 billion words from Common Crawl …","url":["https://www.aclweb.org/anthology/S19-2049"]} |
| {"year":"2019","title":"PLAGO: A SYSTEM FOR PLAGIARISM DETECTION AND INTERVENTION IN MASSIVE COURSES","authors":["CT Guida - 2019"],"snippet":"… Web Crawl: Used for queuing and monitoring of importing web pages from the CommonCrawl.org public dataset (described in 3.5.2). • Admin Options … pages. Common Crawl is a non-profit organization which offers a public …","url":["https://smartech.gatech.edu/bitstream/handle/1853/61787/GUIDA-THESIS-2019.pdf?sequence=1&isAllowed=y"]} |
| {"year":"2019","title":"Poetry: Identification, Entity Recognition, and Retrieval","authors":["IV Foley, J John - 2019"],"snippet":"Page 1. University of Massachusetts Amherst ScholarWorks@UMass Amherst Doctoral Dissertations Dissertations and Theses 2019 Poetry: Identification, Entity Recognition, and Retrieval John J. Foley IV Follow this and additional …","url":["https://scholarworks.umass.edu/cgi/viewcontent.cgi?article=2628&context=dissertations_2"]} |
| {"year":"2019","title":"Populating Web Scale Knowledge Graphs using Distantly Supervised Relation Extraction and Validation","authors":["A Gliozzo, MR Glass, S Dash, M Canim - arXiv preprint arXiv:1908.08104, 2019"],"snippet":"… Also, a web-scale experiment conducted to extend DBPedia with knowledge from Common Crawl shows that our system is not only scalable but also does not require any adaptation cost, while yielding substantial accuracy gain. 1 Introduction …","url":["https://arxiv.org/pdf/1908.08104"]} |
| {"year":"2019","title":"Precise Detection of Content Reuse in the Web","authors":["C Ardi, J Heidemann - ACM SIGCOMM Computer Communication Review, 2019"],"snippet":"… We verify our algorithm and its choices with controlled experiments over three web datasets: Common Crawl (2009/10), GeoCities (1990s–2000s), and a phishing corpus (2014) … In the Common Crawl dataset of 40.5×109 chunks, we set the threshold to 105 …","url":["https://dl.acm.org/citation.cfm?id=3336940"]} |
| {"year":"2019","title":"Predicting ConceptNet Path Quality Using Crowdsourced Assessments of Naturalness","authors":["Y Zhou, S Schockaert, JA Shah - arXiv preprint arXiv:1902.07831, 2019"],"snippet":"… The number in parenthesis after each feature name indicates the dimension of that feature. Vertex embedding (300) This feature is taken directly from the 300dimensional GloVe (25) embedding, pre-trained on the Common Crawl2 dataset with 840 billion tokens …","url":["https://arxiv.org/pdf/1902.07831"]} |
| {"year":"2019","title":"Predicting Word Concreteness and Imagery","authors":["J Charbonnier, C Wartena - Proceedings of the 13th International Conference on …, 2019"],"snippet":"… The other two version (also available with and without subword information) with 2 million word vectors trained on the Common Crawl with 600B tokens. In our experiments we used the version trained on Common Crawl without …","url":["https://www.aclweb.org/anthology/W19-0415"]} |
| {"year":"2019","title":"Probing Contextualized Sentence Representations with Visual Awareness","authors":["Z Zhang, R Wang, K Chen, M Utiyama, E Sumita… - arXiv preprint arXiv …, 2019"],"snippet":"… We used newsdev2016 as the dev set and newstest2016 as the test set. 2) For the EN-DE translation task, 4.43M bilingual sentence pairs of the WMT14 dataset were used as training data, including Common Crawl, News Commentary, and Europarl v7 …","url":["https://arxiv.org/pdf/1911.02971"]} |
| {"year":"2019","title":"Product Classification Using Microdata Annotations","authors":["Z Zhang, M Paramita - International Semantic Web Conference, 2019"],"snippet":"… dimension of the continuous vector representation of each word. In this work, we use the GloVe word embedding vectors pre-trained on the Common Crawl corpus 3 with 300 dimensions. Since we are dealing with content from e …","url":["https://link.springer.com/chapter/10.1007/978-3-030-30793-6_41"]} |
| {"year":"2019","title":"Provision and Usage of Provenance Data in the WebIsALOD Knowledge Graph","authors":["S Hertling, H Paulheim - CEUR Workshop Proceedings, 2018"],"snippet":"… As described in [6], the Copyright c 2018 for this paper by its authors. Copying permitted for private and academic purposes. 1 https://commoncrawl.org 2 NP stands for noun phrase. 3 https://www.w3.org/TR/skos-reference/ Page 2. isa:concept/_Gmail …","url":["http://ceur-ws.org/Vol-2317/article-06.pdf"]} |
| {"year":"2019","title":"PT-CoDE: Pre-trained Context-Dependent Encoder for Utterance-level Emotion Recognition","authors":["W Jiao, MR Lyu, I King - arXiv preprint arXiv:1910.08916, 2019"],"snippet":"… Here, we utilize the 300-dimensional pre-trained GloVe word vectors1 (Pennington et al., 2014) trained over 840B Common Crawl to initialize the word embedding layer. Those words that cannot be found in the GloVe …","url":["https://arxiv.org/pdf/1910.08916"]} |
| {"year":"2019","title":"QE BERT: Bilingual BERT using Multi-task Learning for Neural Quality Estimation","authors":["H Kim, JH Lim, HK Kim, SH Na - Proceedings of the Fourth Conference on Machine …, 2019"],"snippet":"… We used parallel data provided for the WMT19 news machine translation task6 to pre-train QE BERT. The English-Russian parallel data set consisted of the ParaCrawl corpus, Common Crawl corpus, News Commentary corpus, and Yandex …","url":["https://www.aclweb.org/anthology/W19-5407"]} |
| {"year":"2019","title":"QED: A Fact Verification and Evidence Support System","authors":["J Luken - 2019"],"snippet":"… embedding layers, as described below. 4.3.2 Embedding We use GloVe word embeddings (Pennington et al., 2014) with 300 dimensions pretrained using CommonCrawl to get a vector representation of the evidence sentence. We","url":["https://etd.ohiolink.edu/!etd.send_file?accession=osu1555074124008897&disposition=inline"]} |
| {"year":"2019","title":"Quantifying the Semantic Core of Gender Systems","authors":["DBPH Wallach"],"snippet":"… 4The FASTTEXT word embeddings were trained using Common Crawl and Wikipedia data, using CBOW with po- sition weights, with character n-grams of length 5. For more information, see http://fasttext.cc/docs/en …","url":["https://openreview.net/pdf?id=ByxcApoPwS"]} |
| {"year":"2019","title":"QuAVONet: Answering Questions on the SQuAD Dataset with QANet and Answer Verifier","authors":["J Cervantes"],"snippet":"… 5.2 Implementation Details For the word embeddings, I used the starter code's 300-dimensional GloVE vectors trained on the CommonCrawl dataset [6]. These embeddings remained unchanged and were not trained for any of my models …urlhttps://pdfs.semanticscholar.org/f71e/5c6cdd9e06068625eb82b3d9647823e80503.pdf |
| year2019titleQuick and (maybe not so) Easy Detection of Anorexia in Social Media PostsauthorsE Mohammadi, H Amini, L Kosseim - 2019snippet… As shown in Figure 1, these token vectors are then fed to the hidden layer. Two different pretrained word embeddings were experimented with. The first word embedder was the 300d version of GloVe [26] that was pretrained …urlhttps://www.researchgate.net/profile/Hessam_Amini/publication/334848955_Quick_and_maybe_not_so_Easy_Detection_of_Anorexia_in_Social_Media_Posts/links/5d434b9992851cd04699c9ce/Quick-and-maybe-not-so-Easy-Detection-of-Anorexia-in-Social-Media-Posts.pdf |
| year2019titleQuotient Hash Tables-Efficiently Detecting Duplicates in Streaming DataauthorsR Géraud, M Lombard-Platet, D Naccache - arXiv preprint arXiv:1901.04358, 2019snippetPage 1. arXiv:1901.04358v1 [cs.DS] 14 Jan 2019 Quotient Hash Tables - Efficiently Detecting Duplicates in Streaming Data Rémi Gérauda,c, Marius Lombard-Platet∗ a,b, and David Naccachea,c aDépartement d'informatique …","url":["https://arxiv.org/pdf/1901.04358"]} |
| {"year":"2019","title":"Racial bias in legal language","authors":["D Rice, JH Rhodes, T Nteta - Research & Politics, 2019"],"snippet":"Although racial bias in the law is widely recognized, it remains unclear how these biases are in entrenched in the language of the law, judicial opinions. In th...","url":["https://journals.sagepub.com/doi/pdf/10.1177/2053168019848930"]} |
| {"year":"2019","title":"Random Projection in Deep Neural Networks","authors":["PI Wójcik - arXiv preprint arXiv:1812.09489, 2018"],"snippet":"Page 1. Akademia Górniczo-Hutnicza im. Stanisława Staszica w Krakowie Wydział Informatyki, Elektroniki i Telekomunikacji Katedra Informatyki Rozprawa doktorska Zastosowania metody rzutu przypadkowego w głębokich …","url":["https://arxiv.org/pdf/1812.09489"]} |
| {"year":"2019","title":"Real or Fake? Learning to Discriminate Machine from Human Generated Text","authors":["A Bakhtin, S Gross, M Ott, Y Deng, MA Ranzato… - arXiv preprint arXiv …, 2019"],"snippet":"… CCNews: We collect a de-duplicated subset of the English portion of the CommonCrawl news dataset [Nagel, 2016], which totals around 16 Billion words … Sebastian Nagel. Cc-news. http://web.archive.org/save/http …","url":["https://arxiv.org/pdf/1906.03351"]} |
| {"year":"2019","title":"Real-time Claim Detection from News Articles and Retrieval of Semantically-Similar Factchecks","authors":["B Adler, G Boscaini-Gilroy - arXiv preprint arXiv:1907.02030, 2019"],"snippet":"… new problem. Many unsupervised text embeddings are trained on the CommonCrawl 1 dataset of approx. 840 billion tokens. This … dataset. Supervised datasets are 1CommonCrawl found at http://commoncrawl.org/ unlikely ever …","url":["https://arxiv.org/pdf/1907.02030"]} |
| {"year":"2019","title":"Real-time event detection using recurrent neural network in social sensors","authors":["VQ Nguyen, TN Anh, HJ Yang - International Journal of Distributed Sensor Networks, 2019"],"snippet":"We proposed an approach for temporal event detection using deep learning and multi-embedding on a set of text data from social media. First, a convolutional neural network augmented with multiple w...","url":["https://journals.sagepub.com/doi/pdf/10.1177/1550147719856492"]} |
| {"year":"2019","title":"Real-world Conversational AI for Hotel Bookings","authors":["B Li, N Jiang, J Sham, H Shi, H Fazal - arXiv preprint arXiv:1908.10001, 2019"],"snippet":"… We compare the following models: 1) Averaged GloVe + feedforward: We use 100dimensional, trainable GloVe embeddings [17] trained on Common Crawl, and produce sentence embeddings for each of the two inputs by averaging across all tokens …","url":["https://arxiv.org/pdf/1908.10001"]} |
| {"year":"2019","title":"Recommendation System with Aspect-Based Sentiment Analysis","authors":["Q Du, D Zhu, W Duan"],"snippet":"… The word vectors model we use is the \"en_core_web_lg\" model in spaCy. The model contains English multi-task CNN trained on OntoNotes 5[3], with GloVe[8] vectors trained on Common Crawl. It provides 300dimensional …","url":["http://rafaelsilva.com/wp-content/uploads/2018/12/014-Aspect-based-sentiment-analysis.pdf"]} |
| {"year":"2019","title":"Refining Word Reesprentations by Manifold Learning","authors":["C Yonghe, H Lin, L Yang, Y Diao, S Zhang, F Xiaochao"],"snippet":"… judgment. This is exemplified by the WS353[Finkelstein et al., 2001]word similarity ground truth in Figure 1. Based on the Common Crawl corpus (42B), the Glove model is used to train 300-dimensional word vectors. The similarity …","url":["https://www.ijcai.org/proceedings/2019/0749.pdf"]} |
| {"year":"2019","title":"Regressing Word and Sentence Embeddings for Regularization of Neural Machine Translation","authors":["IJ Unanue, EZ Borzeshi, M Piccardi - arXiv preprint arXiv:1909.13466, 2019"],"snippet":"… De-En: The German-English dataset (de-en) has been taken from the WMT18 news translation shared task1. The training set contains over 5M sentence pairs collected from the Europarl, CommonCrawl and Newscommentary parallel corpora …","url":["https://arxiv.org/pdf/1909.13466"]} |
| {"year":"2019","title":"Rel4KC: A Reinforcement Learning Agent for Knowledge Graph Completion and Validation","authors":["X Lin, P Subasic, H Yin - 2019"],"snippet":"… The fact triples extracted from free text (Common Crawl) are then fed to the trained RL agent to determine their trustworthiness. If passing the validation, a triple is entered into target KG … The free text used in this study is Common Crawl corpus …","url":["http://www.cse.msu.edu/~zhaoxi35/DRL4KDD/1.pdf"]} |
| {"year":"2019","title":"Repositioning privacy concerns: Web servers controlling URL metadata","authors":["R Ferreira, RL Aguiar - Journal of Information Security and Applications, 2019"],"snippet":"… on empirical observation of web browsers and HTTP server implementations, and while some implementations allow longer URLs (eg, 100.000 octets) this value remains a reasonable assumption for practical purposes 1 . Our …","url":["https://www.sciencedirect.com/science/article/pii/S2214212618302588"]} |
| {"year":"2019","title":"Representation Learning for Question Classification via Topic Sparse Autoencoder and Entity Embedding","authors":["D Li, J Zhang, P Li - IEEE Big Data, 2018"],"snippet":"… WordNet 2. The embeddings of entity-related information are also trained with skip-gram. The word embeddings are initialized with the 300 dimensional pretrained vectors 3 from the Common Crawl of 840 billion tokens and 2.2 …","url":["http://research.baidu.com/Public/uploads/5c1c9ab3069f4.pdf"]} |
| {"year":"2019","title":"Representing Overlaps in Sequence Labeling Tasks with a Novel Tagging Scheme: bigappy-unicrossy","authors":["G Berk, B Erden, T Güngör"],"snippet":"… a language-independent system based on the bidirectional LSTM-CRF model provided by [7]. Similar to Deep-BGT system [2], we make use of the pretrained word embeddings provided by fastText [6]. The word embeddings …","url":["https://www.cmpe.boun.edu.tr/~gungort/papers/Representing%20Overlaps%20in%20Sequence%20Labeling%20Tasks%20with%20a%20Novel%20Tagging%20Scheme%20-%20bigappy-unicrossy.pdf"]} |
| {"year":"2019","title":"Review and Visualization of Facebook's FastText Pretrained Word Vector ModelauthorsJC Young, A Rusli - … International Conference on Engineering, Science, and …, 2019snippet… Machine Learning (ML). Currently, FastText provides pretrained Word2Vec model for 157 language that trained on Common Crawl and Wikipedia (Bahasa Indonesia is one from the provided model) [15]. In its Word2Vec model …urlhttps://ieeexplore.ieee.org/abstract/document/8863015/ |
| year2019titleRIPPED: Recursive Intent Propagation using Pretrained Embedding DistancesauthorsM Ball - 2019snippet… GloVe (Pennington et al., 2014) is a word embedding model trained on data from the Common Crawl corpus6. GloVe is a log-bilinear regression model that incorporates both local context windows and global matrix …urlhttps://cs.brown.edu/research/pubs/theses/ugrad/2019/ball.michael.pdf |
| year2019titleRNN Embeddings for Identifying Difficult to Understand Medical WordsauthorsH Pylieva, A Chernodub, N Grabar, T Hamon - … of the 18th BioNLP Workshop and …, 2019snippet… improve classification accuracy for our specific problem. We note that FastText word embeddings trained on Wikipedia and Common Crawl5 texts have an important part of words from our dataset. According to our analysis, the …urlhttps://www.aclweb.org/anthology/W19-5011 |
| year2019titleRoBERTa: A Robustly Optimized BERT Pretraining ApproachauthorsY Liu, M Ott, N Goyal, J Du, M Joshi, D Chen, O Levy… - arXiv preprint arXiv …, 2019snippet… (16GB). • CC-NEWS, which we collected from the En- glish portion of the CommonCrawl News dataset (Nagel, 2016) … STORIES, a dataset introduced in Trinh and Le (2018) containing a subset of CommonCrawl data filtered …urlhttps://arxiv.org/pdf/1907.11692 |
| year2019titleRobust Argument Unit Recognition and ClassificationauthorsD Trautmann, J Daxenberger, C Stab, H Schütze… - arXiv preprint arXiv …, 2019snippet… 2http://commoncrawl.org/2016/02/ february-2016-crawl-archive-now-available/ 3https://www.elastic.co/products/ elasticsearch the topic. Each document was checked for its corresponding WARC file at the Common Crawl In …urlhttps://arxiv.org/pdf/1904.09688 |
| year2019titleRobust Named Entity Recognition with Truecasing PretrainingauthorsS Mayhew, N Gupta, D Roth - arXiv preprint arXiv:1912.07095, 2019snippet… and Kauchak (2011) and used in Susanto, Chieu, and Lu (2016), and a specially preprocessed large dataset from English Common Crawl (CC).1 … 1commoncrawl.org 2In a naming clash, the moses script is called …urlhttps://arxiv.org/pdf/1912.07095 |
| year2019titleSACABench: Benchmarking Suffix Array ConstructionauthorsJ Bahne, N Bertram, M Böcker, J Bode, J Fischer… - International Symposium on …, 2019snippet… We removed every character but A, C, G, and T. CommonCrawl (\\(\\sigma =242,\\mathrm {avg\\_lcp}=3,995, \\mathrm {max\\_lcp}=605,632\\)), which is a crawl of the web done by the CommonCrawl Corpus (http://commoncrawl.org) without any HTML tags …urlhttps://link.springer.com/chapter/10.1007/978-3-030-32686-9_29 |
| year2019titleSamsung and University of Edinburgh's System for the IWSLT 2019","authors":["J Wetesko, M Chochowski, P Przybysz, P Williams… - 2019"],"snippet":"… CommonCrawl and NewsCrawl corpora we used the approach de- scribed in [5]. Two RNN language models were constructed using Marian toolkit: in-domain trained with MUST-C corpus and out-of-domain created using …","url":["https://www.zora.uzh.ch/id/eprint/176328/1/IWSLT2019_paper_34.pdf"]} |
| {"year":"2019","title":"Satellite System Graph: Towards the Efficiency Up-Boundary of Graph-Based Approximate Nearest Neighbor Search","authors":["C Fu, C Wang, D Cai - arXiv preprint arXiv:1907.06146, 2019"],"snippet":"Page 1. Satellite System Graph: Towards the Efficiency Up-Boundary of Graph-Based Approximate Nearest Neighbor Search Cong Fu, Changxu Wang, Deng Cai ∗ The State Key Lab of CAD&CG, College of Computer Science …","url":["https://arxiv.org/pdf/1907.06146"]} |
| {"year":"2019","title":"SberQuAD--Russian Reading Comprehension Dataset: Description and Analysis","authors":["P Efimov, L Boytsov, P Braslavski - arXiv preprint arXiv:1912.09723, 2019"],"snippet":"… We tokenized text using spaCy16. To initialize the embedding layer for BiDAF, DocQA, DrQA, and R-Net we use Russian case-sensitive fastText embeddings trained on Common Crawl and Wikipedia17. This initialization is used for both questions and paragraphs …","url":["https://arxiv.org/pdf/1912.09723"]} |
| {"year":"2019","title":"SC-UPB at the VarDial 2019 Evaluation Campaign: Moldavian vs. Romanian Cross-Dialect Topic Identification","authors":["C Onose, DC Cercel, S Trausan-Matu - Proceedings of the Sixth Workshop on NLP …, 2019"],"snippet":"… (2018), Nordic Language Processing Laboratory (NLPL) word embedding repository (Kutuzov et al., 2017) and Common Crawl (CC) word vectors (Grave et al., 2018). The relevant details for each word vector representation model can be viewed in Table 2 …","url":["https://www.aclweb.org/anthology/W19-1418"]} |
| {"year":"2019","title":"Scalable Cross-Lingual Transfer of Neural Sentence Embeddings","authors":["H Aldarmaki, M Diab - arXiv preprint arXiv:1904.05542, 2019"],"snippet":"… We used WMT'12 Common Crawl data for crosslingual alignment, and WMT'12 test sets for evaluations. We used the augmented SNLI data de- scribed in (Dasgupta et al., 2018) and their translations for training the mono-lingual and joint InferSent models …","url":["https://arxiv.org/pdf/1904.05542"]} |
| {"year":"2019","title":"SECNLP: A Survey of Embeddings in Clinical Natural Language Processing","authors":["K KS, S Sangeetha - arXiv preprint arXiv:1903.01039, 2019","KK Subramanyam, S Sivanesan - Journal of Biomedical Informatics, 2019"],"snippet":"Page 1. 1 SECNLP: A Survey of Embeddings in Clinical Natural Language Processing Kalyan KS, S. Sangeetha Text Analytics and Natural Language Processing Lab Department of Computer Applications National …","url":["https://arxiv.org/pdf/1903.01039","https://www.sciencedirect.com/science/article/pii/S1532046419302436"]} |
| {"year":"2019","title":"Security In Plain TXT","authors":["A Portier, H Carter, C Lever"],"snippet":"… These seed domains are compiled from a combination of sources, including the Alexa top 1 million, the TLD zone files for COM, NAME, NET, ORG, and BIZ, sites captured by the Common Crawl project, multiple public domain …","url":["http://www.henrycarter.org/papers/plaintxt19.pdf"]} |
| {"year":"2019","title":"Security Posture Based Incident Forecasting","authors":["D Mulugeta - 2019"],"snippet":"Page 1. Page 2. Page 3. Security Posture Based Incident Forecasting A Thesis Submitted to the Faculty of Drexel University by Dagmawi Mulugeta in partial fulfillment of the requirements for the degree of Master of Science June 2019 Page 4 …","url":["http://search.proquest.com/openview/a6f070655e6045b93b595adc3b0965ae/1?pq-origsite=gscholar&cbl=18750&diss=y"]} |
| {"year":"2019","title":"See-Through-Text Grouping for Referring Image Segmentation","authors":["DJ Chen, S Jia, YC Lo, HT Chen, TL Liu - … of the IEEE International Conference on …, 2019"],"snippet":"… The representation st is visual-attended and its goodness is linked to the predicted segmentation map Pt−1. The GloVe model in our implementation is pre-trained on Common Crawl in 840B tokens. Following …","url":["http://openaccess.thecvf.com/content_ICCV_2019/papers/Chen_See-Through-Text_Grouping_for_Referring_Image_Segmentation_ICCV_2019_paper.pdf"]} |
| {"year":"2019","title":"Semantic Characteristics of Schizophrenic Speech","authors":["K Bar, V Zilberstein, I Ziv, H Baram, N Dershowitz… - arXiv preprint arXiv …, 2019"],"snippet":"… Specifically, we used Hebrew pretrained vectors provided by fastText (Grave et al., 2018), which were created from Wikipedia,3 as well as from other content extracted from the web with Common Crawl.4 Overall, 97% of the words in our corpus exist in fastText …","url":["https://arxiv.org/pdf/1904.07953"]} |
| {"year":"2019","title":"Semantic similarity measure for Thai language","authors":["P Wongchaisuwat"],"snippet":"… In this paper, pre-trained word vectors from fastText [10] and Thai2vec [1] corpus are used to compute the similarity between given words. The facebook research distributed the word vector trained on a common crawl and Wikipedia using the fastText model …","url":["https://saki.siit.tu.ac.th/isai-nlp2018/uploads_final/5__a25c56af02784c266f98ef0378499ff1/iSAI-NLP2018_0005_final.pdf"]} |
| {"year":"2019","title":"Semantic Textual Similarity Measures for Case-Based Retrieval of Argument Graphs","authors":["M Lenz, S Ollinger, P Sahitaj, R Bergmann - International Conference on Case-Based …, 2019"],"snippet":"… Word2vec GoogleNews 3 vectors are trained on the Google News dataset on about 100B tokens. GloVe 4 is trained on the Common Crawl dataset on 840B tokens. fastText 5 vectors are trained on Wikipedia and Common Crawl …","url":["https://link.springer.com/chapter/10.1007/978-3-030-29249-2_15"]} |
| {"year":"2019","title":"Semi-supervised machine learning with word embedding for classification in price statistics","authors":["H Martindale, E Rowland, T Flower - 16th Meeting of the Ottawa Group on Price …, 2019"],"snippet":"Page 1. Office for National Statistics 1 Semi-supervised machine learning with word embedding for classification: April 2019 26/04/2019 Semi-supervised machine learning with word embedding for classification in price statistics …","url":["https://eventos.fgv.br/sites/eventos.fgv.br/files/arquivos/u161/semi-supervised_ml_for_price_stats-ottawa_group.pdf"]} |
| {"year":"2019","title":"Semi-supervised Neural Machine Translation via Marginal Distribution Estimation","authors":["Y Wang, Y Xia, L Zhao, J Bian, T Qin, E Chen, TY Liu - IEEE/ACM Transactions on …, 2019"],"snippet":"Page 1. 2329-9290 (c) 2019 IEEE. Personal use is permitted, but republication/ redistribution requires IEEE permission. See http://www.ieee.org/ publications_standards/publications/rights/index.html for more information. This …","url":["https://ieeexplore.ieee.org/abstract/document/8732422/"]} |
| {"year":"2019","title":"SENPAI: Supporting Exploratory Text Analysis through Semantic & Syntactic Pattern Inspection","authors":["M Samory, T Mitra - 2019"],"snippet":"… lemmatization, so as to remove surface form variations which do not alter the meaning of a word, eg the lemma for both “moved” and “moves” is “move.” Then, we encode lemmas with the corresponding 300-dimensional word …","url":["http://people.cs.vt.edu/tmitra/public/papers/icwsm19-SENPAI.pdf"]} |
| {"year":"2019","title":"Sense disambiguation for Punjabi language using supervised machine learning techniques","authors":["VP Singh, P Kumar - Sādhanā, 2019"],"snippet":"… The character n-grams of length 5 have been applied to words in window of size 5 with 10 negative samples [10]. It has been trained on the Punjabi Wikipedia and the raw web data fetched by common crawl method. 6 Working of WSD System for Punjabi language …","url":["https://link.springer.com/article/10.1007/s12046-019-1206-x"]} |
| {"year":"2019","title":"Sentence and Word Weighting for Neural Machine Translation Domain Adaptation","authors":["PP Chen"],"snippet":"Page 1. Sentence and Word Weighting for Neural Machine Translation Domain Adaptation Pinzhen (Patrick) Chen Undergraduate Dissertation Artificial Intelligence and Software Engineering School of Informatics The …","url":["https://project-archive.inf.ed.ac.uk/ug4/20191530/ug4_proj.pdf"]} |
| {"year":"2019","title":"Sentence Classification and Information Retrieval for Petroleum Engineering","authors":["TF Ferraz, GABA Ferreira, FG Cozman, I Santos"],"snippet":"… Accordingly, we used a word embedding representation in order to represent the words as vectors and then be able to define and compute distances between terms. We used a pre-trained embedding model called ”Common Crawl” [Pennington et al. 2014] …","url":["http://www.bracis2019.ufba.br/Camera_Ready/199118_1.pdf"]} |
| {"year":"2019","title":"Sentence Mover's Similarity: Automatic Evaluation for Multi-Sentence TextsauthorsE Clark, A Celikyilmaz, NA Smithsnippet… We obtain GloVe embeddings, which are type-based, 300-dimensional embeddings trained on Common Crawl,9 using spaCy,10 while the ELMo em- beddings are character-based, 1,024-dimensional, contextual …urlhttps://homes.cs.washington.edu/~nasmith/papers/clark+celikyilmaz+smith.acl19.pdf |
| year2019titleSentence-Level Content Planning and Style Specification for Neural Text GenerationauthorsX Hua, L Wang - arXiv preprint arXiv:1909.00734, 2019snippet… Statistics are shown in Table 1. Input Keyphrases and Label Construction. To obtain the input keyphrase candidates and their sentence-level selection labels, we first construct queries to retrieve passages from Wikipedia and news articles collected from commoncrawl …urlhttps://arxiv.org/pdf/1909.00734 |
| year2019titleSentiment AnalysisauthorsD Sarkar - Text Analytics with Python, 2019snippetIn this chapter, we cover one of the most interesting and widely used aspects pertaining to natural language processing (NLP), text analytics, and machine learning. The problem at hand is sentiment...urlhttps://link.springer.com/chapter/10.1007/978-1-4842-4354-1_9 |
| year2019titleSeparate Chaining Meets Compact HashingauthorsD Köppl - arXiv preprint arXiv:1905.00163, 2019snippetPage 1. Separate Chaining Meets Compact Hashing Dominik Köppl Department of Informatics, Kyushu University, Japan Society for Promotion of Science Abstract While separate chaining is a common strategy for resolving …urlhttps://arxiv.org/pdf/1905.00163 |
| year2019titleSequence Labeling to Detect Stuttering Events in Read SpeechauthorsS Alharbi, M Hasan, AJH Simons, S Brumfitt, P Green - Computer Speech & …, 2019snippet… In the present study, we used a pre-trained GloVe model to generate word embeddings for each utterance. This model was trained on the Common Crawl (CC) corpus (1.9 M vocab) Pennington et al. (2014). 6. Automatic Speech Recognition System …urlhttps://www.sciencedirect.com/science/article/pii/S0885230819302967 |
| year2019titleSequence Time Expression Recognition in the Spanish Clinical NarrativeauthorsA Ruiz-de-la-Cuadra, JL López-Cuadrado… - 2019 IEEE 32nd …, 2019snippet… embedding (Table 1). Name Training Words Size Resource Glo200Ve Non-zero entries [37] 840 B 300 Common Crawl Spanish Billion Word [38] Word2Vec [39] 1.5 B 300 Sensem, Ancora Corpus, OPUS Project, etc. EVEX Word2Vec …urlhttps://ieeexplore.ieee.org/abstract/document/8787434/ |
| year2019titleSequence-to-sequence Pre-training with Data Augmentation for Sentence RewritingauthorsY Zhang, T Ge, F Wei, M Zhou, X Sun - arXiv preprint arXiv:1909.06002, 2019snippet… Specifically, for a correct sentence, a back translation model trained with the public GEC data first generates 10 best outputs; then a 5-gram language model (JunczysDowmunt and Grundkiewicz, 2016) trained on Common …urlhttps://arxiv.org/pdf/1909.06002 |
| year2019titleSequential Attention-based Network for Noetic End-to-End Response SelectionauthorsQ Chen, W Wang - arXiv preprint arXiv:1901.02609, 2019snippet… Embedding Training corpus #Words glove.6B.300d Wikipedia + Gigaword 0.4M glove.840B.300d Common Crawl 2.2M glove.twitter.27B.200d Twitter 1.2M … 1.0M crawl-300d-2M.vec Common Crawl 2.0M word2vec.300d Linux manual pages 0.3M …urlhttps://arxiv.org/pdf/1901.02609 |
| year2019titleSequential Matching Model for End-to-end Multi-turn Response SelectionauthorsQ Chen, W Wang - ICASSP 2019-2019 IEEE International Conference on …, 2019snippet… Re- sults on the Ubuntu development set are shown in Table 3. We can see that word2vec embedding trained on the training dataset achieves better results than Fasttext [23] embedding trained on the unlabeled corpus …urlhttps://ieeexplore.ieee.org/abstract/document/8682538/ |
| year2019titleSequential transfer learning in NLP for text summarizationauthorsP Fechtsnippet… With W and ˜W, the model generates two sets of word vectors which are supposed to perform equally if X is symmetric [64]. The GloVe model has been trained on varying sized datasets from one up to 42 billion (Common Crawl) tokens of data …urlhttps://www.inovex.de/fileadmin/files/Fachartikel_Publikationen/Theses/sequential-transfer-learning-in-nlp-for-text-summarization-pascal-fecht-2019.pdf |
| year2019titleShould John Be More Likely A Physician Than Lisa: Bias-Performance Trade-Off for Gendered Pronoun ResolutionauthorsS Goel, J Li, H Zhengsnippet… the female gendered words. For our case, we are using the pre-trained Glove6 (these cotain 840B tokens and are trained on the Common Crawl corpus) embeddings to get the hard-debiased em- beddings. To obtain these …urlhttps://shivankgoel.github.io/notes/ds/Gendered_Pronoun_Resolution.pdf |
| year2019titleSimilarity Driven Approximation for Text AnalyticsauthorsG Hu, Y Zhang, S Rigo, TD Nguyen - arXiv preprint arXiv:1910.07144, 2019snippet… For example, the Google Books Ngram data set contains 2.2 TB of data [1], and the Common Crawl corpus contains petabytes of data [2]. Processing such large text data sets can be computationally expensive, especially if it involves sophisticated algorithms …urlhttps://arxiv.org/pdf/1910.07144 |
| year2019titleSituating Sentence Embedders with Nearest Neighbor OverlapauthorsLH Lin, NA Smith - arXiv preprint arXiv:1909.10724, 2019snippet… GloVe average 100 Wikipedia 2014 + Gigaword 5 (6B tokens, uncased) 300 Wikipedia 2014 + Gigaword 5 (6B tokens, uncased) 300 Common Crawl (840B tokens, cased) FastText average 300 Wikipedia + UMBC + statmt.org …urlhttps://arxiv.org/pdf/1909.10724 |
| year2019titleSix dimensions describe action understanding: the ACT-FASTaxonomyauthorsMA Thornton, D Tamir, PS Hall - PsyArXiv. June, 2019snippet… different algorithm. For the present purposes, we used a pre-trained version of GloVe based on the Common Crawl: a set of 840 billion tokens generated by scraping the entire web. For model comparison, we derived an …urlhttps://psyarxiv.com/gt6bw/download/?format=pdf |
| year2019titleSMART: Robust and Efficient Fine-Tuning for Pre-trained Natural Language Models through Principled Regularized OptimizationauthorsH Jiang, P He, W Chen, X Liu, J Gao, T Zhao - arXiv preprint arXiv:1911.03437, 2019snippet… For example, the well-known “Common Crawl project” is producing text data extracted from web pages at a rate of about 20TB per month. The resulting extremely large text corpus allows us to train extremely large neural network-based general language models …urlhttps://arxiv.org/pdf/1911.03437 |
| year2019titleSocial Relation Extraction from Chatbot Conversations: A Shortest Dependency Path ApproachauthorsM Glas - SKILL 2019-Studierendenkonferenz Informatik, 2019snippet… The dictionary used here, 5 https://github.com/zalandoresearch/flair 6 https://spacy.io/ 7 http://commoncrawl.org/ 8 https://catalog.ldc.upenn.edu/LDC2013T19 Page 8. 8 Markus Glas Fig. 3: Example of a dependency path within a sentence containing two entities …urlhttps://dl.gi.de/bitstream/handle/20.500.12116/28989/SKILL2019-01.pdf?sequence=1 |
| year2019titleSocial Sensing for Improving the User Experience in OrienteeringauthorsF Persia, S Helmer, S Pugacs, G Pilato - 2019 IEEE 13th International Conference on …, 2019snippet… ing. In particular, we have used the spaCy “en core web md” language model, which is an “English multi-task Convolutional Neural Network trained on OntoNotes [34], with GloVe [35] vectors trained on Common Crawl [36]” …urlhttps://ieeexplore.ieee.org/abstract/document/8665498/ |
| year2019titleSOK: A Comprehensive Reexamination of Phishing Research from the Security PerspectiveauthorsA Das, S Baki, AE Aassal, R Verma, A Dunbar - arXiv preprint arXiv:1911.00953, 2019snippetPage 1. REEXAMINING PHISHING RESEARCH 1 SOK: A Comprehensive Reexamination of Phishing Research from the Security Perspective Avisha Das, Shahryar Baki, Ayman El Aassal, Rakesh Verma, and Arthur Dunbar …urlhttps://arxiv.org/pdf/1911.00953 |
| year2019titleSparse Victory–A Large Scale Systematic Comparison of Count-Based and Prediction-Based Vectorizers for Text ClassificationauthorsR Chakraborty, K Arora, A Elhencesnippet… Corpus (100 billion words). For greater ease of comparison both the GloVe and fastText models have a dimension of 300 and have been trained on the Common Crawl Corpus (640 billion words). The ELMo embedding has …urlhttps://acl-bg.org/proceedings/2019/RANLP%202019/pdf/RANLP022.pdf |
| year2019titleST-Sem: A Multimodal Method for Points-of-Interest Classification Using Street-Level ImageryauthorsSS Noorian, A Psyllidis, A Bozzon - International Conference on Web Engineering, 2019snippet… representing each word as a bag of character n-grams. We use pre-trained word vectors for 2 languages (English and German), trained on Common Crawl and Wikipedia 6 . According to the detected language l, the corresponding pre …urlhttps://link.springer.com/chapter/10.1007/978-3-030-19274-7_3 |
| year2019titleSTAR-GCN: Stacked and Reconstructed Graph Convolutional Networks for Recommender SystemsauthorsJ Zhang, X Shi, S Zhao, I King - arXiv preprint arXiv:1905.13129, 2019snippet… For movie features, we concatenate the title name, release year, and one-hot encoded genres. We process title names by averaging the off-the-shelf 300-dimensional GloVe CommonCrawl word vector [Pennington et al., 2014] of each word …urlhttps://arxiv.org/pdf/1905.13129 |
| year2019titleSTD: An Automatic Evaluation Metric for Machine Translation Based on Word EmbeddingsauthorsP Li, C Chen, W Zheng, Y Deng, F Ye, Z Zheng - IEEE/ACM Transactions on Audio …, 2019snippet… H and M are their means respectively. The word embedding used in our STD implementation is the freely-available fastText word embedding1 [11], which has 2 million word vectors trained on Common Crawl (600B tokens) …urlhttps://ieeexplore.ieee.org/abstract/document/8736840/ |
| year2019titleStreaming Infrastructure and Natural Language Modeling with Application to Streaming Big DataauthorsY Du - 2019snippet… In our research, we try to find an alternative resource to study such data. Common Crawl is a massive multi-petabyte dataset hosted by Amazon. It contains archived HTML web page data from 2008 to date. Common …urlhttps://tigerprints.clemson.edu/all_dissertations/2329/ |
| year2019titleStructured Two-Stream Attention Network for Video Question AnsweringauthorsL Gao, P Zeng, J Song, YF Li, W Liu, T Mei, HT Shen - Proceedings of the AAAI …, 2019snippet… consisting of M words, is first converted into a sequence Q = {qm}M m=1, where qm is a one-hot vector representing the word at position m. Next, we employ the word embedding GloVe (Pennington, Socher, and Manning …urlhttps://www.aaai.org/ojs/index.php/AAAI/article/view/4602/4480 |
| year2019titleStudy of Tibetan Text Classification based on fastTextauthorsW Ma, H Yu, J Ma - 3rd International Conference on Computer Engineering …snippet… Every single text in all data is a line, and the \ is added at the beginning of each line. Pre-training data set: fastText publishes word vectors in 157 languages [13], which are trained on Common Crawl and Wikipedia using fastText …urlhttps://download.atlantis-press.com/article/125913150.pdf |
| year2019titleSUBMISSION OF WRITTEN WORKauthorsO ERSITY, F COsnippetPage 1. IT U N IV ERSITY O F CO PEN H A G EN SUBMISSION OF WRITTEN WORK Class code: Name of course: Course manager: Course e-portfolio: Thesis or project title: Supervisor: Full Name: Birthdate (dd/mm-yyyy) …urlhttp://www.derczynski.com/itu/docs/Multilingual%20hate%20speech%20detection.pdfhttps://www.derczynski.com/itu/docs/Multilingual%20hate%20speech%20detection.pdf |
| year2019titleSubword-based Compact Reconstruction of Word EmbeddingsauthorsS Sasaki, J Suzuki, K Inui - Proceedings of the 2019 Conference of the North …, 2019snippet… or embedding vectors), especially those trained on a vast amount of text data, such as the Common Crawl (CC) cor … word embeddings trained from GloVe.840B and fastText.600B are available: https://github.com/losyer …urlhttps://www.aclweb.org/anthology/N19-1353 |
| year2019titleSuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding SystemsauthorsA Wang, Y Pruksachatkun, N Nangia, A Singh…snippet… We also include a baseline where for each task we simply predict the majority class, as well as a bag-of-words baseline where each input is represented as an average of its tokens' GloVe word vectors (300-dimensional and trained …","url":["https://w4ngatang.github.io/static/papers/superglue.pdf"]} |
| {"year":"2019","title":"Supervised Multimodal Bitransformers for Classifying Images and Text","authors":["D Kiela, S Bhooshan, H Firooz, D Testuggine - arXiv preprint arXiv:1909.02950, 2019"],"snippet":"… We describe each of the baselines in more detail below. • Bag of words (Bow) We sum 300-dimensional GloVe embeddings (Pennington, Socher, and Manning 2014) (trained on Common Crawl) for all words in the text …","url":["https://arxiv.org/pdf/1909.02950"]} |
| {"year":"2019","title":"Supplementary Material for “Multi-task Learning of Hierarchical Vision-Language Representation”","authors":["DK Nguyen, T Okatani"],"snippet":"… Questions and captions were tokenized using Python Natural Language Toolkit (nltk) [2]. We used the vocabulary provided by the CommonCrawl-840B GloVe model for English word vectors [8], and set out-of-vocabulary words to unk …","url":["https://pdfs.semanticscholar.org/83a6/fd8eadd36c22bdac861bd2b20aba87968c3d.pdf"]} |
| {"year":"2019","title":"Survey on Publicly Available Sinhala Natural Language Processing Tools and Research","authors":["N de Silva - arXiv preprint arXiv:1906.02358, 2019"],"snippet":"… [21] further provided two monolingual corpora for Sinhala. Those were a 155k+ sentences of filtered Sinhala Wikipedia8 and 5178k+ sentences of Sinhala common crawl9. 2.2 Data Sets Specific data sets for Sinhala, as expected. is scarce …","url":["https://arxiv.org/pdf/1906.02358"]} |
| {"year":"2019","title":"Synchronous Bidirectional Neural Machine Translation","authors":["L Zhou, J Zhang, C Zong - Transactions of the Association for Computational …, 2019"],"snippet":"Create a new account. Email. Returning user. Can't sign in? Forgot your password? Enter your email address below and we will send you the reset instructions. Email. Cancel. If the address matches an existing account you will …urlhttps://www.mitpressjournals.org/doi/full/10.1162/tacl_a_00256 |
| year2019titleSyntactic dependencies correspond to word pairs with high mutual informationauthorsR Futrell, P Qian, E Gibson, E Fedorenko, IA Blanksnippet… 2.5 Dataset We use the Common Crawl corpus (Buck et al., 2014) of English web text … Entropy, 19:275–307. Buck, C., Heafield, K., and Van Ooyen, B. (2014). N-gram counts and language models from the common crawl. In …urlhttp://socsci.uci.edu/~rfutrell/papers/futrell2019syntactic.pdf |
| year2019titleSyntactically Supervised Transformers for Faster Neural Machine TranslationauthorsN Akoury, K Krishna, M Iyyer - arXiv preprint arXiv:1906.02780, 2019snippet… For English-German, we evaluate on WMT 2014 En↔De as well as IWSLT 2016 En→De, while for English-French we train on the Europarl / Common Crawl subset of the full WMT 2014 En→Fr data and evaluate over the full dev/test sets …urlhttps://arxiv.org/pdf/1906.02780 |
| year2019titleSyntax-aware Multilingual Semantic Role LabelingauthorsS He, Z Li, H Zhao - arXiv preprint arXiv:1909.00310, 2019snippet… The pre-trained word em- bedding is 100-dimensional GloVe vectors (Pennington et al., 2014) for English, 300-dimensional fastText vectors (Grave et al., 2018) trained on Common Crawl and Wikipedia for other languages …urlhttps://arxiv.org/pdf/1909.00310 |
| year2019titleSyntax-Aware Sentence Matching with Graph Convolutional NetworksauthorsY Lei, Y Hu, X Wei, L Xing, Q Liu - International Conference on Knowledge Science …, 2019snippet… 4.2 Experiment Setting. In order to compare with the baseline, we use the same setting as BiMPM. We initialize word embeddings in the word representation layer with the 300-dimensional GloVe word vectors …urlhttps://link.springer.com/chapter/10.1007/978-3-030-29563-9_31 |
| year2019titleSystem and method for chat community question answeringauthorsN Londhe, S Kannan, N Bojja - US Patent App. 16/272,142, 2019snippetUS20190260694A1 - System and method for chat community question answering - Google Patents. System and method for chat community question answering. Download PDF Info. Publication number US20190260694A1. US20190260694A1 …urlhttps://patentimages.storage.googleapis.com/0c/f5/b6/7687c26806b141/US20190260694A1.pdf |
| year2019titleSystem and method for concise display of query results via thumbnails with indicative images and differentiating termsauthorsTP O'hara - US Patent 10,459,999, 2019"],"snippet":"… grams). In the case of a meta-search engine without access to the underlying indexes, one approach is to use data from the Common Crawl to derive global n-gram counts for TF-IDF and language modeling filtering. Another …","url":["http://www.freepatentsonline.com/10459999.html"]} |
| {"year":"2019","title":"System for creating a reasoning graph and for ranking of its nodes","authors":["B Agapiev - US Patent App. 15/793,751, 2019"],"snippet":"… View, Calif. and Common Crawl Foundation of Beverly Hills, Calif.) are processed (20) to identify statements of causal relationships (22, 24), which are then analyzed to extract causes and associated effect pairs (26). These …","url":["https://patentimages.storage.googleapis.com/ca/d2/fd/8b3a7f8fa4ec15/US20190073420A1.pdf"]} |
| {"year":"2019","title":"TüBa-D/DP Stylebook","authors":["D de Kok, S Pütz - 2019"],"snippet":"… Table 1: Subcorpora of the TüBa-D/DP. Subcorpus Genre Sentences Tokens Europarl Parliamentary proceedings 2.2M 55M taz (1986-2009) Newspaper 29.9M 393.7M Wikipedia (2019) Encyclopedia 42.2M …","url":["https://sfb833-a3.github.io/tueba-ddp/stylebook/stylebook-r4.pdf"]} |
| {"year":"2019","title":"TabbyXL: Rule-Based Spreadsheet Data Extraction and Transformation","authors":["A Shigarov, V Khristyuk, A Mikhailov, V Paramonov - International Conference on …, 2019"],"snippet":"… a spreadsheet-like format. Barik et al. [2] extracted 0.25M unique spreadsheets from Common Crawl 1 archive. Chen and Cafarella [6] reported about 0.4M spreadsheets of ClueWeb09 Crawl 2 archive. Spreadsheets can be …","url":["https://link.springer.com/chapter/10.1007/978-3-030-30275-7_6"]} |
| {"year":"2019","title":"Tackling Graphical NLP problems with Graph Recurrent Networks","authors":["L Song - 2019"],"snippet":"Page 1. Tackling Graphical NLP problems with Graph Recurrent Networks by Linfeng Song Submitted in Partial Fulfillment of the Requirements for the Degree Doctor of Philosophy Supervised by Professor Daniel Gildea Department of Computer Science …","url":["https://www.cs.rochester.edu/~lsong10/papers/Linfeng_Song_PhD_thesis.pdf"]} |
| {"year":"2019","title":"TARGER: Neural Argument Mining at Your Fingertips","authors":["A Chernodub, O Oliynyk, P Heidenreich, A Bondarenko…"],"snippet":"… Our background collection for the retrieval of argumentative sentences is formed by the DepCC corpus (Panchenko et al., 2018), a linguistically pre-processed subset of the Common Crawl containing 14.3 … Building a …","url":["https://webis.de/downloads/publications/papers/bondarenko_2019b.pdf"]} |
| {"year":"2019","title":"Task definition, annotated dataset, and supervised natural language processing models for symptom extraction from unstructured clinical notes","authors":["JM Steinkamp, W Bala, A Sharma, JJ Kantrowitz - Journal of Biomedical Informatics, 2019"],"snippet":"… Our word embeddings consisted of 300-dimensional Global Vectors (GloVe) [35] trained on the web common crawl data set concatenated with 300-dimensional custom trained FastText [28] vectors trained on the entirety …","url":["https://www.sciencedirect.com/science/article/pii/S153204641930276X"]} |
| {"year":"2019","title":"Team EP at TAC 2018: Automating data extraction in systematic reviews of environmental agents","authors":["A Nowak, P Kunstman - arXiv preprint arXiv:1901.02081, 2019"],"snippet":"… The model architecture is shown in Figure 3. Embeddings layer: Each token is represented by 1452 dimensional vector, consisting of: • 300-dimensional GloVe (Pennington et al., 2014) embedding (cased, trained on 840B tokens from Common Crawl) …","url":["https://arxiv.org/pdf/1901.02081"]} |
| {"year":"2019","title":"Techniques for Inverted Index Compression","authors":["GE Pibiri, R Venturini - arXiv preprint arXiv:1908.10598, 2019"],"snippet":"Page 1. Techniques for Inverted Index Compression GIULIO ERMANNO PIBIRI, ISTI-CNR, Italy ROSSANO VENTURINI, University of Pisa, Italy The data structure at the core of large-scale search engines is the inverted index …","url":["https://arxiv.org/pdf/1908.10598"]} |
| {"year":"2019","title":"Tell me you can read me","authors":["CE SUM, T THEOR"],"snippet":"Page 55. Complying with the obligation of transparency imposes indeed on the data controller the prior obligation to determine–deliberately or not, consciously or not–who are the targeted data subjects, and what are they supposed to find intelligible and easily accessible …","url":["https://pdfs.semanticscholar.org/8c2a/8c105a49e59c457c68b8390b49694c4c4c20.pdf#page=55"]} |
| {"year":"2019","title":"Temporal Context-Aware Representation Learning for Question Routing","authors":["X Zhang, W Cheng, B Zong, Y Chen, J Xu, D Li…"],"snippet":"… The state-of-the-art document embedding model, InferSent [3], is applied to compute the similarity between questions. We use the pre-trained 300-dimensional word vectors from fastText[19], which is trained on Common Crawl containing 600B tokens …","url":["https://xuczhang.github.io/papers/wsdm20_tcqr.pdf"]} |
| {"year":"2019","title":"Temporally Grounding Language Queries in Videos by Contextual Boundary-aware Prediction","authors":["J Wang, L Ma, W Jiang - arXiv preprint arXiv:1909.05010, 2019"],"snippet":"… 2015) features are adopted for all compared methods. Each word from the query is represented by GloVe (Pennington, Socher, and Manning 2014) word embedding vectors pre-trained on Common Crawl. We set hidden neuron size of LSTM to 512 …","url":["https://arxiv.org/pdf/1909.05010"]} |
| {"year":"2019","title":"Text Classification Using SVM Enhanced by Multithreading and CUDA","authors":["S Chatterjee, PG Jose, D Datta - International Journal of Modern Education and …, 2019"],"snippet":"Page 1. IJ Modern Education and Computer Science, 2019, 1, 11-23 Published Online January 2019 in MECS (http://www.mecs-press.org/) DOI: 10.5815/ijmecs.2019.01.02 Copyright © 2019 MECS IJ Modern Education and Computer Science, 2019, 1, 11-23 …","url":["http://search.proquest.com/openview/ab6d5a2cbbb23e2cba642a09784b043e/1?pq-origsite=gscholar&cbl=2026674"]} |
| {"year":"2019","title":"Text Corpus for NLP","authors":["C Room"],"snippet":"… Sep 2019. Common Crawl publishes 240 TiB of uncompressed data from 2.55 billion web pages. Of these, 1 billion URLs were not present in previous crawls. Common Crawl started in 2008. In 2013, they moved from ARC to Web ARChive (WARC) file format …","url":["https://devopedia.org/text-corpus-for-nlp"]} |
| {"year":"2019","title":"TEXT QUALITY EVALUATION METHODS AND PROCESSES","authors":["AA Pala, A Kagoshima, M Tober - US Patent App. 15/863,408, 2019"],"snippet":"… In one possible implementation, the reference text 2000 can be parts, or the complete version, of Wikipedia, for a given language, or one or more books, or Common Crawl, or any other corpus that consists of human-written high quality text …","url":["http://www.freepatentsonline.com/y2019/0213247.html"]} |
| {"year":"2019","title":"The AFRL WMT19 Systems: Old Favorites and New Tricks","authors":["J Gwinnup, G Erdmann, T Anderson - Proceedings of the Fourth Conference on …, 2019"],"snippet":"… Corpus Total Retained CommonCrawl 723,256 655,069 newscommentary 290,866 264,089 Yandex 1,000,000 901,307 ParaCrawl 12,061,155 5,173,675 UN2016 11,365,709 9,871,406 Total Lines 25,440,968 16,865,546 …","url":["https://www.aclweb.org/anthology/W19-5318"]} |
| {"year":"2019","title":"The BEA-2019 Shared Task on Grammatical Error Correction","authors":["C Bryant, M Felice, ØE Andersen, T Briscoe - … Workshop on Innovative Use of NLP for …, 2019"],"snippet":"Page 1. Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 52–75 Florence, Italy, August 2, 2019. c 2019 Association for Computational Linguistics 52 The BEA-2019 …","url":["https://www.aclweb.org/anthology/W19-4406"]} |
| {"year":"2019","title":"The BLCU System in the BEA 2019 Shared Task","authors":["L Yang, C Wang - Proceedings of the Fourteenth Workshop on Innovative …, 2019"],"snippet":"… JunczysDowmunt and Grundkiewicz (2016); JunczysDowmunt et al. (2018) utilize the Common Crawl corpus to train the language model and pre-train part of the NMT model. Inspired by these studies, we also try to use a monolingual corpus for data augmentation …","url":["https://www.aclweb.org/anthology/W19-4421"]} |
| {"year":"2019","title":"The Geometry of Culture: Analyzing the Meanings of Class through Word Embeddings","authors":["AC Kozlowski, M Taddy, JA Evans - American Sociological Review, 2019"],"snippet":"We argue word embedding models are a useful tool for the study of culture using a historical analysis of shared understandings of social class as an empirical case. Word embeddings represent semant...","url":["https://journals.sagepub.com/doi/abs/10.1177/0003122419877135"]} |
| {"year":"2019","title":"The impact of individual audit partners on their clients' narrative disclosuresauthorsC Mauritz, M Nienhaus, C Oehler - 2019snippetPage 1. The impact of individual audit partners on their clients' narrative disclosures ∗ Christoph Mauritz1, Martin Nienhaus2, and Christopher Oehler2 1University of Münster 2Goethe-University Frankfurt September 5, 2019 Abstract …","url":["http://www.geaba.de/wp-content/uploads/2019/09/Mauritz-Nienhaus-Oehler_2.pdf"]} |
| {"year":"2019","title":"The LAIX Systems in the BEA-2019 GEC Shared Task","authors":["R Li, C Wang, Y Zha, Y Yu, S Guo, Q Wang, Y Liu… - … on Innovative Use of NLP for …, 2019"],"snippet":"… Table 1 lists the data sets used in Restricted Track and Unrestricted Track, including FCE (Yannakoudakis et al., 2011), Lang-82 (Mizumoto et al., 2012), NUCLE (Ng et al., 2014), W&I+LOCNESS (Bryant et al., 2019) and Com …","url":["https://www.aclweb.org/anthology/W19-4416"]} |
| {"year":"2019","title":"The LIG system for the English-Czech Text Translation Task of IWSLT 2019","authors":["L Vial, B Lecouteux, D Schwab, H Le, L Besacier - arXiv preprint arXiv:1911.02898, 2019"],"snippet":"… C is a speech translation corpus of TED talks, similar to the test data of the task, and we added the News Commentary corpus, which consists of political and economic commentaries, be- cause it was the second smallest corpus …","url":["https://arxiv.org/pdf/1911.02898"]} |
| {"year":"2019","title":"The Linked Open Data cloud is more abstract, flatter and less linked than you may think!","authors":["L Asprino, W Beek, P Ciancarini, F van Harmelen… - arXiv preprint arXiv …, 2019"],"snippet":"… The two largest available crawls of LOD that are available today are WebDataCommons and LOD-a-lot. WebDataCommons5 [12] consists of ∼31B triples that have been extracted from the CommonCrawl datasets (November 2018 version) …","url":["https://arxiv.org/pdf/1906.08097"]} |
| {"year":"2019","title":"The NiuTrans Machine Translation Systems for WMT19","authors":["B Li, Y Li, C Xu, Y Lin, J Liu, H Liu, Z Wang, Y Zhang…"],"snippet":"… For EN↔RU, we used the following resource provided by WMT, including News Commentaryv14, ParaCrawl-v3, CommonCrawl and Yandex … corpus via random samplimng from 2M monolingual data selected by Xenc in the …","url":["http://nlplab.com/members/xiaotong_files/2019-wmt.pdf"]} |
| {"year":"2019","title":"The Quest to Automate Fact-checking","authors":["C Li"],"snippet":"… The model contains 300-dimensional vectors for 3 million words and phrases. https://code.google.com/archive/p/word2vec/ 2: Global Vectors for Word Representation using The Common Crawl corpus which contains …","url":["https://pdfs.semanticscholar.org/13e0/ef9f40c767060b510e2aa75740a3eda60ad4.pdf"]} |
| {"year":"2019","title":"The relationship between implicit intergroup attitudes and beliefs","authors":["B Kurdi, TC Mann, TES Charlesworth, MR Banaji - Proceedings of the National …, 2019"],"snippet":"Skip to main content. Submit; About: Editorial Board; PNAS Staff; FAQ; Rights and Permissions; Site Map. Contact; Journal Club; Subscribe: Subscription Rates; Subscriptions FAQ; Open Access; Recommend PNAS to Your …","url":["https://www.pnas.org/content/early/2019/02/26/1820240116.short"]} |
| {"year":"2019","title":"The RWTH Aachen University Machine Translation Systems for WMT 2019","authors":["J Rosendahl, C Herold, Y Kim, M Graça, W Wang… - Proceedings of the Fourth …, 2019"],"snippet":"… For De→En, we use data from CommonCrawl, Europarl, NewsCommentary and Rapid … (2017)), but without tied embedding weights, on the data from CommonCrawl, Europarl, NewsCommentary and Rapid ie about 6M sentence pairs …","url":["https://www.aclweb.org/anthology/W19-5338"]} |
| {"year":"2019","title":"The Semantic Web: Two Decades On","authors":["A Hogan"],"snippet":"Page 1. Semantic Web 0 (0) 1 1 IOS Press 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15 16 16 17 17 18 18 19 19 20 20 21 21 22 22 23 23 24 24 25 25 26 26 27 27 28 28 29 29 30 30 31 31 32 32 33 …","url":["http://www.semantic-web-journal.net/system/files/swj2303.pdf"]} |
| {"year":"2019","title":"The Source-Target Domain Mismatch Problem in Machine Translation","authors":["J Shen, PJ Chen, M Le, J He, J Gu, M Ott, M Auli… - arXiv preprint arXiv …, 2019"],"snippet":"… For Myanmar monolingual data, we use the language split Commoncrawl data from (Buck et al., 2014) which includes texts in various domains crawled from the web. We use the myanmar-tools2 library to classify and convert all Zawgyi text to Unicode …","url":["https://arxiv.org/pdf/1909.13151"]} |
| {"year":"2019","title":"The TALP-UPC Machine Translation Systems for WMT19 News Translation Task: Pivoting Techniques for Low Resource MT","authors":["N Casas, JAR Fonollosa, C Escolano, C Basta… - Proceedings of the Fourth …, 2019"],"snippet":"… 4.2 English-Russian The available parallel English-Russian corpora for the shared task included News Commentary v14, Wiki Titles v1, Common Crawl corpus, ParaCrawl v3, Yandex Corpus and the United Nations Parallel Corpus v1.0 (Ziemski et al., 2016) …","url":["https://www.aclweb.org/anthology/W19-5311"]} |
| {"year":"2019","title":"The Universitat d'Alacant submissions to the English-to-Kazakh news translation task at WMT 2019authorsVM Sánchez-Cartagena, JA Pérez-Ortiz…snippet… 556 corpus lang. raw cleaned News Crawl kk 783k 783k Wiki dumps kk 1.7M 1.7M Common Crawl kk 10.9M 5.4M News Crawl en 200M 200M … The same filtering was applied to the monolingual Kazakh Common Crawl corpus …urlhttps://www.dlsi.ua.es/~fsanchez/pub/pdf/sanchez-cartagena19a.pdf |
| year2019titleThe University of Helsinki submissions to the WMT19 news translation taskauthorsA Talman, U Sulubacak, R Vázquez, Y Scherrer… - arXiv preprint arXiv …, 2019snippet… removing all sentence pairs with a length difference ratio above a certain threshold: for CommonCrawl, ParaCrawl and Rapid we used a threshold of 3, for WikiTitles a threshold of 2, and for all other data sets a threshold of 9; …urlhttps://arxiv.org/pdf/1906.04040 |
| year2019titleThe University of Sydney's Machine Translation System for WMT19","authors":["L Ding, D Tao - arXiv preprint arXiv:1907.00494, 2019"],"snippet":"… 3 Data Preparation We used all available parallel corpus 3 for Finnish→ English except the “Wiki Headlines” due to the large number of incomplete sentences, and for monolingual target side English data, we selected all …","url":["https://arxiv.org/pdf/1907.00494"]} |
| {"year":"2019","title":"The Web is missing an essential part of infrastructure: an Open Web Index","authors":["D Lewandowski - arXiv preprint arXiv:1903.03846, 2019"],"snippet":"… A search engine needs to keep its index current, meaning it needs to update at least a part of it every minute. This is an important requirement that is not being met by any of the current projects (like Common Crawl) …","url":["https://arxiv.org/pdf/1903.03846"]} |
| {"year":"2019","title":"TiFi: Taxonomy Induction for Fictional Domains [Extended version]","authors":["CX Chu, S Razniewski, G Weikum - arXiv preprint arXiv:1901.10263, 2019"],"snippet":"Page 1. TiFi: Taxonomy Induction for Fictional Domains [Extended version] ∗ Cuong Xuan Chu Max Planck Institute for Informatics Saarbrücken, Germany cxchu@mpi-inf. mpg.de Simon Razniewski Max Planck Institute for Informatics …","url":["https://arxiv.org/pdf/1901.10263"]} |
| {"year":"2019","title":"TLR at BSNLP2019: A Multilingual Named Entity Recognition System","authors":["JG Moreno, EL Pontes, M Coustaty, A Doucet - Proceedings of the 7th Workshop on …, 2019"],"snippet":"… in Figure 1. 3.1 FastText Embedding In this layer, we used pre-trained embeddings for each language trained on Common Crawl and Wikipedia using fastText (Bojanowski et al., 2017; Grave et al., 2018). These models were …","url":["https://www.aclweb.org/anthology/W19-3711"]} |
| {"year":"2019","title":"TMU Transformer System Using BERT for Re-ranking at BEA 2019 Grammatical Error Correction on Restricted Track","authors":["M Kaneko, K Hotate, S Katsumata, M Komachi - … Workshop on Innovative Use of NLP …, 2019"],"snippet":"… The 5-gram language model for re-ranking was trained on a subset of the Common Crawl corpus (Chollampatt and Ng, 2018a).5 We used a Python spell checker tool6 on the GEC model hy- pothesis sentences. 3.3 Evaluation …","url":["https://www.aclweb.org/anthology/W19-4422"]} |
| {"year":"2019","title":"Top-K Attention Mechanism for Complex Dialogue System","authors":["CU Shina, JW Chab - 2019"],"snippet":"… Then, the model submit the candidate with the highest value among the given candidates as the final correct an- swer. They randomly sampled one of the 99 negative samples to prevent bias during learning and used …","url":["http://workshop.colips.org/dstc7/papers/33.pdf"]} |
| {"year":"2019","title":"Toponym Identification in Epidemiology Articles--A Deep Learning Approach","authors":["MR Davari, L Kosseim, TD Bui - arXiv preprint arXiv:1904.11018, 2019"],"snippet":"… In order to measure the effect of such domain specific information, we experimented with 2 other pretrained word embedding models: Google News Word2vec [11], and a GloVe Model trained on Common Crawl [24] … Common Crawl GloVe 2.2M 300 29.84 …","url":["https://arxiv.org/pdf/1904.11018"]} |
| {"year":"2019","title":"Toward Automated Worldwide Monitoring of Network-Level Censorship","authors":["Z Weinberg - 2018"],"snippet":"Page 1. Toward Automated Worldwide Monitoring of Network-level Censorship Submitted in partial fulfillment of the requirements for the degree of Doctor of Philosophy in Electrical and Computer Engineering Zachary Weinberg BA Chemistry, Columbia University …","url":["http://search.proquest.com/openview/11a5908644ea63a6b01b3f0c4d23ce4e/1?pq-origsite=gscholar&cbl=18750&diss=y"]} |
| {"year":"2019","title":"Toward Gender-Inclusive Coreference Resolution","authors":["YT Cao, H Daumé III - arXiv preprint arXiv:1910.13913, 2019"],"snippet":"Page 1. Toward Gender-Inclusive Coreference Resolution YANG TRISTA CAO, University of Maryland HAL DAUMÉ III, Microsoft Research & University of Maryland ABSTRACT Correctly resolving textual mentions of people …","url":["https://arxiv.org/pdf/1910.13913"]} |
| {"year":"2019","title":"Towards a Global Perspective on Web Tracking","authors":["N Samarasinghe, M Mannan - Computers & Security, 2019"],"snippet":"… Schelter et al. Schelter and Kunegis (2016) performed a large scale analysis of third-party trackers using the Common Crawl 2012 corpus. The corpus may contain tracking information of residential as well as institutional users …","url":["https://www.sciencedirect.com/science/article/pii/S0167404818314007"]} |
| {"year":"2019","title":"Towards an Automated Extraction of ABAC Constraints from Natural Language Policies","authors":["M Alohaly, H Takabi, E Blanco - IFIP International Conference on ICT Systems …, 2019"],"snippet":"… model. To configure the model, we set one hyper-parameter value at a time. Our default settings: dropout = 0, decay rate = 0, number of BiLSTM cells (ie, layers) = 1, and GloVe (Common crawl) with 300 dimensions. To determine …","url":["https://link.springer.com/chapter/10.1007/978-3-030-22312-0_8"]} |
| {"year":"2019","title":"Towards an automated method to assess data portals in the deep web","authors":["AS Correa, RM de Souza, FSC da Silva - Government Information Quarterly, 2019"],"snippet":"Skip to main content Skip to article …","url":["https://www.sciencedirect.com/science/article/pii/S0740624X18305185"]} |
| {"year":"2019","title":"Towards Content Expiry Date Determination: Predicting Validity Periods of Sentences","authors":["A Almquist, A Jatowt2r0000"],"snippet":"… For this, we use Common Crawl dataset 16 which is a web dump composed of billions of websites with plain text versions available … For each sentence found in the Common Crawl dataset we identify DATE, TIME and DURATION …","url":["http://www.dl.kuis.kyoto-u.ac.jp/~adam/ecir19a.pdf"]} |
| {"year":"2019","title":"Towards Content Transfer through Grounded Text Generation","authors":["RE Dataset","S Prabhumoye, C Quirk, M Galley - arXiv preprint arXiv:1905.05293, 2019"],"snippet":"05/13/19 - Recent work in neural generation has attracted significant interest in controlling the form of text, such as style, persona, and p...","url":["https://arxiv.org/pdf/1905.05293","https://deepai.org/publication/towards-content-transfer-through-grounded-text-generation"]} |
| {"year":"2019","title":"Towards countering hate speech and personal attack in social media","authors":["P Charitidis, S Doropoulos, S Vologiannidis… - arXiv preprint arXiv …, 2019"],"snippet":"… each language. After conducting some preliminary experiments, the best pre-trained embedding choice for Greek and French language was using fastText embeddings [45], trained on Common Crawl and Wikipedia. For English …","url":["https://arxiv.org/pdf/1912.04106"]} |
| {"year":"2019","title":"Towards Functionally Similar Corpus Resources for Translation","authors":["M Kunilovskaya, S Sharoff"],"snippet":"… Secondly, we used lemmatised texts, with stop words filtered out (biLSTMlex in Table 1). For both scenarios we used pre-trained word embeddings of size 300, trained on the English Wikipedia and CommonCrawl data, using …","url":["http://corpus.leeds.ac.uk/serge/publications/2019-RANLP.pdf"]} |
| {"year":"2019","title":"Towards Multimodal Emotion Recognition in German Speech Events in Cars using Transfer Learning","authors":["D Cevher, S Zepf, R Klinger - arXiv preprint arXiv:1909.02764, 2019"],"snippet":"… We use a neural network with an embedding layer (frozen weights, pretrained on Common Crawl and Wikipedia (Grave et al., 2018)), a bidirectional LSTM (Schuster and Paliwal, 1997), and two dense layers followed by a soft max output layer …","url":["https://arxiv.org/pdf/1909.02764"]} |
| {"year":"2019","title":"Towards Multimodal Sarcasm Detection (An _Obviously_ Perfect Paper)","authors":["S Castro, D Hazarika, V Pérez-Rosas, R Zimmermann… - arXiv preprint arXiv …, 2019"],"snippet":"… 768. We also considered averaging Common Crawl pre-trained 300 dimensional GloVe word vectors (Pennington et al., 2014) for each token; however, it resulted in lower performance as compared to BERT-based features …","url":["https://arxiv.org/pdf/1906.01815"]} |
| {"year":"2019","title":"Towards Non-task-specific Distillation of BERT via Sentence Representation Approximation","authors":["B Wu, H Zhang, M Li, Z Wang, Q Feng, J Huang… - arXiv preprint arXiv …, 2020","HZ Bowen Wu, M Li, Z Wang, Q Feng, J Huang…"],"snippet":"… distillation. 4.3 Hyperparameters For the student model in our proposed distilling method, we employ the 300-dimension GloVe (840B Common Crawl version; Pennington et al., 2014) to initialize the word embeddings. The …","url":["https://arxiv.org/pdf/2004.03097","https://www.researchgate.net/profile/Bowen_Wu10/publication/337113946_Towards_Non-task-specific_Distillation_of_BERT_via_Sentence_Representation_Approximation/links/5dc5cffc4585151435f7df39/Towards-Non-task-specific-Distillation-of-BERT-via-Sentence-Representation-Approximation.pdf"]} |
| {"year":"2019","title":"Towards Robust Named Entity Recognition for Historic German","authors":["S Schweter, J Baiter - arXiv preprint arXiv:1906.07592, 2019"],"snippet":"… 69.59% Common Crawl 68.97% Wikipedia + Common Crawl 72.00% Wikipedia + Common Crawl + Character 74.50 … 69.62% Riedl and Padó (2018) (with transfer-learning) 74.33% ONB Wikipedia 75.80% CommonCrawl 78.70% Wikipedia + CommonCrawl 79.46 …","url":["https://arxiv.org/pdf/1906.07592"]} |
| {"year":"2019","title":"Towards semantic-rich word embeddings","authors":["G Beringer, M Jabłonski, P Januszewski, A Sobecki…"],"snippet":"… collected (III), for the our approach. We use a pretrained embedding model from spaCy - en_vectors_web_lg, which contains 300-dimensional word vectors trained on Common Crawl with GloVe2. We compare results on the …","url":["https://annals-csis.org/Volume_18/drp/pdf/120.pdf"]} |
| {"year":"2019","title":"Towards Unsupervised Grammatical Error Correction using Statistical Machine Translation with Synthetic Comparable Corpus","authors":["S Katsumata, M Komachi - arXiv preprint arXiv:1907.09724, 2019"],"snippet":"… makes up for the synthetic target data. To compare the fluency, the outputs of each best iter on JFLEG were evaluated with the perplexity based on the Common Crawl language model10. The perplexity of USMTforward in iter …","url":["https://arxiv.org/pdf/1907.09724"]} |
| {"year":"2019","title":"Tracking Naturalistic Linguistic Predictions with Deep Neural Language Models","authors":["M Heilbron, B Ehinger, P Hagoort, FP de Lange - arXiv preprint arXiv:1909.04400, 2019"],"snippet":"… Non-predictive controls We included two non-predictive and potentially confounding variables: first, frequency which we quantified as unigram surprise (−log p(w)) which was based on a word's lemma count in the CommonCrawl corpus, obtained via spaCy …urlhttps://arxiv.org/pdf/1909.04400 |
| year2019titleTransfer Learning across Languages from Someone Else's NMT Model","authors":["T Kocmi, O Bojar - arXiv preprint arXiv:1909.10955, 2019"],"snippet":"… WMT 2012 WMT 2018 English - French Commoncrawl, Europarl, Giga FREN, News commentary, UN corpus WMT 2013 WMT dis. 2015 … Based on our previous experiments, we ex- clude the noisiest corpus, ie web crawled ParaCrawl or Commoncrawl …","url":["https://arxiv.org/pdf/1909.10955"]} |
| {"year":"2019","title":"Transfer Learning from Transformers to Fake News Challenge Stance Detection (FNC-1) Task","authors":["V Slovikovskaya - arXiv preprint arXiv:1910.14353, 2019"],"snippet":"… 9XLNet is named after TransformerXL 10These corpora include (1) BOOK CORPUS [Zhu et al., 2015] plus English Wikipedia, the original data used to train BERT (16GB); (2) CC-NEWS, which authors collected from the English …","url":["https://arxiv.org/pdf/1910.14353"]} |
| {"year":"2019","title":"Transforma at SemEval-2019 Task 6: Offensive Language Analysis using Deep Learning Architecture","authors":["R Ong - arXiv preprint arXiv:1903.05280, 2019"],"snippet":"… This allows us to evaluate the increase in di- mensionality on the performance of our models 3. GloVe: Common Crawl (300d) - Trained on 42B tokens, 1.9M vocabulary of unique words … Table 7: T - GloVe Twitter, CC - GloVe Common Crawl …","url":["https://arxiv.org/pdf/1903.05280"]} |
| {"year":"2019","title":"transformers. zip: Compressing Transformers with Pruning and Quantization","authors":["R Cheong, R Daniel - 2019"],"snippet":"… 9: return M 4 Page 5. 4 Experiments 4.1 Dataset We train and evaluate on the WMT English - German translation task. Specifically, we train on all of Europarl, Common Crawl, and News Commentary, validate on the …","url":["https://pdfs.semanticscholar.org/fe82/735fe8ae2163a37aa2787eee0db8efc745b6.pdf"]} |
| {"year":"2019","title":"Translating Translationese: A Two-Step Approach to Unsupervised Machine Translation","authors":["N Pourdamghani, N Aldarrab, M Ghazvininejad…"],"snippet":"… For Arabic we use MultiUN (Tiedemann, 2012). For French we use CommonCrawl For German we use a mix of CommonCrawl (1.7M), and NewsCommentary (300K) … For Spanish we use CommonCrawl (1.8M), and Europarl (200K) …","url":["https://www.isi.edu/~jonmay/pubs/acl19.pdf"]} |
| {"year":"2019","title":"Tree Edit Distance Learning via Adaptive Symbol Embeddings","authors":["BPCGA Micheli, B Hammer"],"snippet":"Deep Learning Monitor. Paper Detail. Close This Page. Tree Edit Distance Learning via Adaptive Symbol Embeddings. 2018-06-18 13:54:45; Benjamin Paaßen, Claudio Gallicchio, Alessio Micheli, Barbara Hammer; 0. Abstract …","url":["https://deeplearn.org/arxiv/38595/tree-edit-distance-learning-via-adaptive-symbol-embeddings"]} |
| {"year":"2019","title":"TU Wien@ TREC Deep Learning'19--Simple Contextualization for Re-rankingauthorsS Hofstätter, M Zlabinger, A Hanbury - arXiv preprint arXiv:1912.01385, 2019snippet… For the full task we generated initial rankings with Anserini using BM25 and utilized the validation sets to tune the re-ranking 1https://github.com/microsoft/BlingFire 242B CommonCrawl lower-cased: https://nlp.stanford.edu/projects/glove …urlhttps://arxiv.org/pdf/1912.01385 |
| year2019titleTwitter Sentiment on Affordable Care Act using Score EmbeddingauthorsM Farhadloo - arXiv preprint arXiv:1908.07061, 2019snippet… The embeddings pre-trained on Common Crawl data were only available in dimension 300 and were trained on 840 billion tokens with vocabulary … of available unlabeled training data had an impact on the performance …urlhttps://arxiv.org/pdf/1908.07061 |
| year2019titleTwo New Evaluation Datasets for Low-Resource Machine Translation: Nepali-English and Sinhala-EnglishauthorsF Guzmán, PJ Chen, M Ott, J Pino, G Lample, P Koehn… - arXiv preprint arXiv …, 2019snippet… M monolingual Wikipedia (en) 67.8M 2.0B Common Crawl (ne) 3.6M 103.0M Wikipedia (ne) 92.3K 2.8M Sinhala–English … 5M monolingual Wikipedia (en) 67.8M 2.0B Common Crawl (si) 5.2M 110.3M Wikipedia (si) 155.9K 4.7M …urlhttps://arxiv.org/pdf/1902.01382 |
| year2019titleType: Report Dissemination level: Public Due Date (in months): 24 (August 2019)authorsCSGOER NetworksnippetPage 1. X Modal X Cultural X Lingual X Domain X Site Global OER Network Grant Agreement Number: 761758 Project Acronym: X5GON Project title: Cross Modal, Cross Cultural, Cross Lingual, Cross Domain, and Cross Site …urlhttps://www.x5gon.org/wp-content/uploads/2019/10/D5.2_afterJSTrev_26Aug19.pdf |
| year2019titleUdS-DFKI Participation at WMT 2019: Low-Resource (en-gu) and Coreference-Aware (en-de) SystemsauthorsC España-Bonet, D Ruiter - Proceedings of the Fourth Conference on Machine …, 2019snippet… proportions. Our base system uses CommonCrawl … x1 Parallel CommonCrawl 2,394,878 x1 x4 Europarl 1,775,445 x1 x4 NewsCommentary 328,059 x4 x16 Rapid 1,105,651 x1 x4 ParaCrawlFiltered 12,424,790 x0 x1 Table …urlhttps://www.aclweb.org/anthology/W19-5315 |
| year2019titleUnderstanding and Mitigating the Security Risks of Content Inclusion in Web BrowsersauthorsS Arshad - 2019snippet… 47 5.1 Sample URL grouping. . . . . 73 5.2 Narrowing down the Common Crawl to the candidate set used in our analysis (from left to right) . . . . 79 5.3 Vulnerable pages and sites in the candidate set …urlhttp://search.proquest.com/openview/5a3bdc0060c7ad7004f26c77dae937c2/1?pq-origsite=gscholar&cbl=18750&diss=y |
| year2019titleUni-and Multimodal and Structured Representations for Modeling Frame SemanticsauthorsT Botschen - 2019snippetPage 1. Uniand Multimodal and Structured Representations for Modeling Frame Semantics Vom Fachbereich Informatik der Technischen Universität Darmstadt genehmigte Dissertation zur Erlangung des akademischen Grades …urlhttp://tuprints.ulb.tu-darmstadt.de/8484/1/Dissertation_TeresaBotschen.pdf |
| year2019titleUnified Visual-Semantic Embeddings: Bridging Vision and Language with Structured Meaning RepresentationsauthorsH Wu, J Mao, Y Zhang, Y Jiang, L Li, W Sun, WY Ma - arXiv preprint arXiv:1904.05521, 2019snippetPage 1. Unified Visual-Semantic Embeddings: Bridging Vision and Language with Structured Meaning Representations Hao Wu1,3,4,6,∗,†, Jiayuan Mao5,6,∗,†, Yufeng Zhang2,6,†, Yuning Jiang6, Lei Li6, Weiwei Sun1,3,4, Wei-Ying Ma6 …urlhttps://arxiv.org/pdf/1904.05521 |
| year2019titleUnraveling the Search Space of Abusive Language in Wikipedia with Dynamic Lexicon AcquisitionauthorsWF Chen, K Al-Khatib, M Hagen, H Wachsmuth…snippet… The hidden state is employed to predict the probability of 'not-attack' using a linear regression layer. We use 300-dimensional word embeddings (Pennington et al., 2014) pre-trained on the Common Crawl with 840 …urlhttps://webis.de/downloads/publications/papers/stein_2019z.pdf |
| year2019titleUnsupervised Cross-lingual Representation Learning at ScaleauthorsA Conneau, K Khandelwal, N Goyal, V Chaudhary… - arXiv preprint arXiv …, 2019snippet… As shown in Figure 1, the CommonCrawl Corpus that we collected has significantly more monolingual data than the previously used Wikipedia corpora. Figure 3 shows that for the same BERTBase architecture, all models …urlhttps://arxiv.org/pdf/1911.02116 |
| year2019titleUnsupervised Extraction of Partial Translations for Neural Machine TranslationauthorsB Marie, A Fujita - Proceedings of the 2019 Conference of the North …, 2019snippet… We extracted monolingual data ourselves from the Common Crawl project8 for Bengali (5.3M lines) and Malay (4.6M lines … 8http://commoncrawl org/ 9https://fasttext.cc/ 10The extraction of 100k partial translations from …urlhttps://www.aclweb.org/anthology/N19-1384 |
| year2019titleUnsupervised Joint Training of Bilingual Word EmbeddingsauthorsB Marie, A Fujita - Proceedings of the 57th Conference of the Association …, 2019snippet… For en- id, we used English (100M lines) and Indonesian (77M lines) Common Crawl corpora.5 We then mapped the word embeddings into a BWE space using VECMAP,6 one of the best and most robust methods for unsupervised mapping (Glavas et al., 2019) …urlhttps://www.aclweb.org/anthology/P19-1312 |
| year2019titleUnsupervised Lemmatization as Embeddings-Based Word ClusteringauthorsR Rosa, Z Žabokrtský - arXiv preprint arXiv:1908.08528, 2019snippet… For the experiments reported in this paper, we use the pretrained word embedding dictionaries available from the FastText website.78 The word embeddings had been trained on Wikipedia9 and Common Crawl10 texts …urlhttps://arxiv.org/pdf/1908.08528 |
| year2019titleUnsupervised Question Answering by Cloze TranslationauthorsP Lewis, L Denoyer, S Riedel - arXiv preprint arXiv:1906.04980, 2019snippet… Question Corpus We mine questions from En- glish pages from a recent dump of common crawl using simple selection criteria:3 We select sen … 3http:// commoncrawl.org/ 4We also experimented with language model pretraining …urlhttps://arxiv.org/pdf/1906.04980 |
| year2019titleUpdating Pre-trained Word Vectors and Text Classifiers using Monolingual AlignmentauthorsP Bojanowski, O Celebi, T Mikolov, E Grave, A Joulin - arXiv preprint arXiv …, 2019snippet… Indeed, despite their size, large web data such as Common Crawl lack coverage for highly technical expert fields such as medicine or law … Training data. We take two subsets of the May 2017 dump of the Common Crawl …urlhttps://arxiv.org/pdf/1910.06241 |
| year2019titleUpdating verbal fluency analysis for the 21st century: Applications for psychiatryauthorsTB Holmlund, J Cheng, PW Foltz, AS Cohen, B Elvevåg - Psychiatry Research, 2019snippet… To base the analysis on a corpus with a wide variety of animal-word sources, we used a set of pre-trained word vectors calculated from approximately 42 billion tokens from the entire internet, courtesy of the Common Crawl project (Pennington et al., 2014) …urlhttps://www.sciencedirect.com/science/article/pii/S0165178118324181 |
| year2019titleUsing Local Knowledge Graph Construction to Scale Seq2Seq Models to Multi-Document InputsauthorsA Fan, C Gardent, C Braud, A Bordes - 2019snippet… WikiSum Second, we experiment on the WikiSum CommonCrawl (Liu et al., 2018b) summarization dataset4 with 1.5 million examples … denotes results from (Liu et al., 2018b) that use data scraped from unrestricted web search, not the static CommonCrawl version …urlhttps://hal.archives-ouvertes.fr/hal-02277063/document |
| year2019titleUsing logical form encodings for unsupervised linguistic transformation: Theory and applicationsauthorsT Gröndahl, N Asokan - arXiv preprint arXiv:1902.09381, 2019snippetPage 1. arXiv:1902.09381v1 [cs.CL] 25 Feb 2019 Using logical form encodings for unsupervised linguistic transformation: Theory and applications Tommi Gröndahl N. Asokan Abstract We present a novel method to architect …urlhttps://arxiv.org/pdf/1902.09381 |
| year2019titleUsing the Semantic Web as a source of training dataauthorsC Bizer, A Primpeli, R Peeters - Datenbank-Spektrum, 2019snippet… The Web Data Commons (WDC) project 4 monitors the adoption of schema.org annotations on the Web by analysing the Common Crawl 5 , a series of public web corpora each containing several billion HTML pages [12]. The …urlhttps://link.springer.com/article/10.1007/s13222-019-00313-y |
| year2019titleUsing Whole Document Context in Neural Machine TranslationauthorsV Macé, C Servan - arXiv preprint arXiv:1910.07481, 2019snippet… models are evaluated on the same standard corpora that have Page 3. Corpora #lines # EN # DE Common Crawl 2.2M 54M 50M Europarl V9† 1.8M 50M 48M News Comm. V14† 338K 8.2M 8.3M ParaCrawl V3 27.5M 569M …urlhttps://arxiv.org/pdf/1910.07481 |
| year2019titleVariational Auto-Decoder: Neural Generative Modeling from Partial DataauthorsA Zadeh, YC Lim, PP Liang, LP Morency - arXiv preprint arXiv:1903.00840, 2019snippet… CMU-MOSEI consists of 23,500 sentences and CMU-MOSI consists of 2199 sentences. For text modality, the datasets contain GloVe word embeddings (Pennington et al., 2014) trained on 840 billion tokens from the Common Crawl dataset …urlhttps://arxiv.org/pdf/1903.00840 |
| year2019titleVernon-fenwick at SemEval-2019 Task 4: Hyperpartisan News Detection using Lexical and Semantic FeaturesauthorsV Srivastava, A Gupta, D Prakash, SK Sahoo, RR Rohit… - Proceedings of the 13th …, 2019snippet… semantic space. We have used 300-dimensional Glove embeddings trained on Common Crawl data of 2.2 million words and 840 billion tokens. An ar- ticle was tokenized into sentences and further into words to obtain it's article representation …","url":["https://www.aclweb.org/anthology/S19-2189"]} |
| {"year":"2019","title":"Video Question Answering with Spatio-Temporal Reasoning","authors":["Y Jang, Y Song, CD Kim, Y Yu, Y Kim, G Kim - International Journal of Computer …, 2019"],"snippet":"Page 1. International Journal of Computer Vision https://doi.org/10.1007/s11263-01901189-x Video Question Answering with Spatio-Temporal Reasoning Yunseok Jang1 · Yale Song2 · Chris Dongjoo Kim1 · Youngjae Yu1 · Youngjin Kim1 · Gunhee Kim1 …","url":["https://link.springer.com/article/10.1007/s11263-019-01189-x"]} |
| {"year":"2019","title":"Vir is to Moderatus as Mulier is to Intemperans Lemma Embeddings for Latin","authors":["R Sprugnoli, M Passarotti, G Moretti"],"snippet":"… Both Facebook and the organizers of the CoNLL shared tasks on multilingual parsing have pre-computed and released word embeddings trained on Latin texts crawled from the web: the former using the fastText model on …","url":["https://www.researchgate.net/profile/Rachele_Sprugnoli/publication/336798734_Vir_is_to_Moderatus_as_Mulier_is_to_Intemperans_Lemma_Embeddings_for_Latin/links/5db2a47e92851c577ec259b4/Vir-is-to-Moderatus-as-Mulier-is-to-Intemperans-Lemma-Embeddings-for-Latin.pdf"]} |
| {"year":"2019","title":"Vision-based Page Rank Estimation with Graph Networks","authors":["TI Denk, S Güner"],"snippet":"… The Open PageRank initiative provides freely available data that was built on top of Common Crawl [do/19], which provides high quality crawl data of webp ages since 2013. Open PageRank uses the number of backlinks of …","url":["https://www.researchgate.net/profile/Timo_Denk/publication/334824445_Vision-based_Page_Rank_Estimation_with_Graph_Networks/links/5d429cb692851cd04696fd56/Vision-based-Page-Rank-Estimation-with-Graph-Networks.pdf"]} |
| {"year":"2019","title":"VizNet: Towards A Large-Scale Visualization Learning and Benchmarking Repository","authors":["K Hu, N Gaikwad, M Bakker, M Hulsebos, E Zgraggen…"],"snippet":"… Corpora The first category of corpora includes data tables harvested from the web. In particular, we use horizontal relational tables from the WebTables 2015 corpus [6], which extracts structured tables from the Common Crawl …","url":["https://hci.stanford.edu/~cagatay/projects/viznet/VizNet-CHI19-Submission.pdf"]} |
| {"year":"2019","title":"Wanca in Korp: Text corpora for underresourced Uralic languages","authors":["H Jauhiainen, T Jauhiainen, K Lindén - DATA AND HUMANITIES (RDHUM) 2019 …"],"snippet":"… In addition to conducting our own crawling, we also used the pre-crawled corpus distributed by the Common Crawl Foundation … 2 In addition to conducting our own crawling, we used the pre-crawled corpus distributed by the Common Crawl Foundation …","url":["https://researchportal.helsinki.fi/files/126205806/Proceedings_RDHum2019.pdf#page=23"]} |
| {"year":"2019","title":"WDC Product Data Corpus and Gold Standard for Large-Scale Product Matching-Version 2.0","authors":["R Peeters, A Primpeli, C Bizer"],"snippet":"… methods. The Web Data Commons project regularly extracts schema.org annotations from the Common Crawl, a large public web corpus. November 2017 version of the WDC schema.org data set contains 365 million offers …","url":["http://webdatacommons.org/largescaleproductcorpus/v2/"]} |
| {"year":"2019","title":"Weakly-Supervised Concept-based Adversarial Learning for Cross-lingual Word Embeddings","authors":["H Wang, J Henderson, P Merlo - arXiv preprint arXiv:1904.09446, 2019"],"snippet":"… (2018a)11, we use their pretrained CBOW embeddings of 300 dimensions. For English, Italian and German, the models are trained on the WacKy corpus. The Finnish model is trained from Common Crawl and the Spanish model is trained from WMT News Crawl …","url":["https://arxiv.org/pdf/1904.09446"]} |
| {"year":"2019","title":"Web Archive Analysis Using Hive and SparkSQL","authors":["X Wang, Z Xie - 2019 ACM/IEEE Joint Conference on Digital Libraries …, 2019"],"snippet":"… Keywords web archive, big data, distributed computation 1 Introduction Web preservation organizations such as Common Crawl or Internet Archive are common sources of web archive data … We use a data set from Common Crawl May 2018 collection …","url":["https://ieeexplore.ieee.org/abstract/document/8791112/"]} |
| {"year":"2019","title":"Web Engineering: 19th International Conference, ICWE 2019, Daejeon, South Korea, June 11–14, 2019, Proceedings","authors":["M Bakaev"],"snippet":"Page 1. Maxim Bakaev Flavius Frasincar In-Young Ko (Eds.) Web Engineering 19th International Conference, ICWE 2019 Daejeon, South Korea, June 11–14, 2019 Proceedings 123 Page 2. Lecture Notes in Computer Science …","url":["http://books.google.de/books?hl=en&lr=lang_en&id=5R6VDwAAQBAJ&oi=fnd&pg=PR5&dq=commoncrawl&ots=X57GCPV1TC&sig=41aU_I70hr0H-D_h9MbSG1Ruryc"]} |
| {"year":"2019","title":"Web table integration and profiling for knowledge base augmentation","authors":["O Lehmberg - 2019"],"snippet":"Page 1. Web Table Integration and Profiling for Knowledge Base Augmentation Inauguraldissertation zur Erlangung des akademischen Grades eines Doktors der Naturwissenschaften der Universität Mannheim …","url":["https://madoc.bib.uni-mannheim.de/52346/1/thesis.pdf"]} |
| {"year":"2019","title":"Web View: Measuring & Monitoring Representative Information on Websites","authors":["A Saverimoutou, B Mathieu, S Vaton - ICIN 2019-QOE-MANAGEMENT 2019, 2019"],"snippet":"… XRay [8] and AdFisher run automated personalization detection experiments and Common Crawl 7 uses an Apache Nutch based crawler … 4http://phantomjs. org/ 5https://www.seleniumhq.org/ 6https://github.com/ghostwords/chameleon …","url":["https://hal.archives-ouvertes.fr/hal-02072471/document"]} |
| {"year":"2019","title":"WebIsAGraph: A Very Large Hypernymy Graph from a Web Corpus","authors":["F Stefano, I Finocchi, SP Ponzetto, V Paola - Sixth Italian Conference on …, 2019","S Faralli, I Finocchi, SP Ponzetto, P Velardi - 2019"],"snippet":"… Abstract In this paper, we present WebIsAGraph, a very large hypernymy graph compiled from a dataset of is-a relationships ex- tracted from the CommonCrawl … This is because, due to their large size, source input corpora …","url":["https://iris.luiss.it/handle/11385/192535","https://www.researchgate.net/profile/Stefano_Faralli2/publication/336899588_WebIsAGraph_A_Very_Large_Hypernymy_Graph_from_a_Web_Corpus/links/5db9a6c24585151435d5b691/WebIsAGraph-A-Very-Large-Hypernymy-Graph-from-a-Web-Corpus.pdf"]} |
| {"year":"2019","title":"What a neural language model tells us about spatial relations","authors":["M Ghanimifard, S Dobnik - Proceedings of the Combined Workshop on Spatial …, 2019"],"snippet":"… Finally, we also use pre-trained GloVe embeddings on the Common Crawl (CC) dataset with 42B tokens4 … On multi-word test suite the P-vectors perform slightly better. On both test suites, GloVe trained on Common Crawl performs …","url":["https://www.aclweb.org/anthology/W19-1608"]} |
| {"year":"2019","title":"What are Links in Linked Open Data? A Characterization and Evaluation of Links between Knowledge Graphs on the Web","authors":["A Haller, JD Fernández, MR Kamdar, A Polleres - Working Papers on Information …, 2019"],"snippet":"Page 1. What are Links in Linked Open Data? A Characterization and Evaluation of Links between Knowledge Graphs on the Web Armin Haller, Javier D. Fernández, Maulik R. Kamdar, Axel Polleres Arbeitspapiere zum Tätigkeitsfeld …","url":["http://epub.wu.ac.at/7193/1/20191002ePub_LOD_link_analysis.pdf"]} |
| {"year":"2019","title":"What does Neural Bring? Analysing Improvements in Morphosyntactic Annotation and Lemmatisation of Slovenian, Croatian and Serbian","authors":["N Ljubešić, K Dobrovoljc - Proceedings of the 7th Workshop on Balto-Slavic …, 2019"],"snippet":"… neural morphosyntactic taggers, we also experiment with various embeddings, mostly (1) the original CoNLL 2017 word2vec (w2v) embeddings for Slovenian and Croatian (Ginter et al., 2017) (there are none available for …","url":["https://www.aclweb.org/anthology/W19-3704"]} |
| {"year":"2019","title":"Who Needs Words? Lexicon-Free Speech Recognition","authors":["T Likhomanenko, G Synnaeve, R Collobert - arXiv preprint arXiv:1904.04479, 2019"],"snippet":"… char GCNN-20B no 6.4 2.7 3.6 1.5 4https://github.com/facebookresearch/wav2letter 5Speaker adaptation; pronunciation lexicon 612k hours AM train set and common crawl LM 7Speaker adaptation; 3k acoustic states 8Data augmentation; n-gram LM …","url":["https://arxiv.org/pdf/1904.04479"]} |
| {"year":"2019","title":"WikiMatrix: Mining 135M Parallel Sentences in 1620 Language Pairs from Wikipedia","authors":["H Schwenk, V Chaudhary, S Sun, H Gong, F Guzmán - arXiv preprint arXiv …, 2019"],"snippet":"… recall. In this work, we chose the global mining op- tion. This will allow us to scale the same ap- proach to other, potentially huge, corpora for which document-level alignments are not easily available, eg Common Crawl. An …","url":["https://arxiv.org/pdf/1907.05791"]} |
| {"year":"2019","title":"WINOGRANDE: An Adversarial Winograd Schema Challenge at Scale","authors":["K Sakaguchi, RL Bras, C Bhagavatula, Y Choi - arXiv preprint arXiv:1907.10641, 2019"],"snippet":"… Ensemble Neural LMs Trinh and Le (2018) is one of the first attempts to apply a neural language model which is pre-trained on a very large corpora (including LM-1-Billion, CommonCrawl, SQuAD, and Gutenberg Books). In …","url":["https://arxiv.org/pdf/1907.10641"]} |
| {"year":"2019","title":"Word Embedding Based Extension of Text Categorization Topic Taxonomies","authors":["T Eljasik-Swoboda, F Engel, M Kaufmann, M Hemmje"],"snippet":"… ArgumenText is a practical implementation of an AM engine (Stab et al., 2018). It employs a two-step mechanism in which a large collection of documents (http://commoncrawl.org/, in Stab et al.'s experiment with 683 …urlhttp://ceur-ws.org/Vol-2348/paper01.pdf |
| year2019titleWord Embedding Models for Query Expansion in Answer Passage RetrievalauthorsS MASTERsnippetPage 1. MASTER'S THESIS Word Embedding Models for Query Expansion in Answer Passage Retrieval NIRMAL ROY Page 2. Page 3. Word Embedding Models for Query Expansion in Answer Passage Retrieval THESIS submitted …","url":["https://pdfs.semanticscholar.org/f436/c49151fd8d00c59655a939bbbd552f1577c4.pdf"]} |
| {"year":"2019","title":"Word Embedding Visualization Via Dictionary Learning","authors":["J Zhang, Y Chen, B Cheung, BA Olshausen - arXiv preprint arXiv:1910.03833, 2019"],"snippet":"… similar. For simplicity, we show the results for the 300 dimensional GloVe word vectors[30] pretrained on CommonCrawl [2]. We shall discuss the difference across different embedding models at the end in this section. Once …","url":["https://arxiv.org/pdf/1910.03833"]} |
| {"year":"2019","title":"Word Embeddings (Also) Encode Human Personality Stereotypes","authors":["O Agarwal, F Durupınar, NI Badler, A Nenkova - … of the Eighth Joint Conference on …, 2019"],"snippet":"… or profession. We experimented with GloVe representations (Pennington et al., 2014) trained on Common crawl (6B tokens, 400K vocab, 300d) and symmetric pattern (SP) based representations (Schwartz et al., 2015). We …","url":["https://www.aclweb.org/anthology/S19-1023"]} |
| {"year":"2019","title":"Word Embeddings and Gender Stereotypes in Swedish and English","authors":["R Précenth - 2019"],"snippet":"Page 1. UUDM Project Report 2019:15 Examensarbete i matematik, 30 hp Handledare: David Sumpter Examinator: Denis Gaidashev Maj 2019 Department of Mathematics Uppsala University Word Embeddings and Gender Stereotypes in Swedish and English …","url":["https://uu.diva-portal.org/smash/get/diva2:1313459/FULLTEXT01.pdf"]} |
| {"year":"2019","title":"Word Embeddings for Fine-Grained Sentiment Analysis","authors":["D Bacon, R Dalal, MRD Kodandarama, MR Hari…"],"snippet":"… Lastly, we considered the word embedding sub-model. We used the GLoVe word vectoring [11] trained on Common Crawl [https://commoncrawl.org/] as implemented by spaCy [7]. This resulted in a vector-dimension of 300 for each word …","url":["https://divatekodand.github.io/files/word_embeddings.pdf"]} |
| {"year":"2019","title":"Word Embeddings for Sentiment Analysis: A Comprehensive Empirical Survey","authors":["E Çano, M Morisio - arXiv preprint arXiv:1902.00753, 2019"],"snippet":"… This bundle contains data of Common Crawl (http: //commoncrawl.org/), a nonprofit organization that builds and maintains free and public text sets by crawling the Web. CommonCrawl42 is a highly reduced version easier and faster to work with …","url":["https://arxiv.org/pdf/1902.00753"]} |
| {"year":"2019","title":"Word Embeddings for the Armenian Language: Intrinsic and Extrinsic Evaluation","authors":["K Avetisyan, T Ghukasyan - arXiv preprint arXiv:1906.03134, 2019"],"snippet":"… A year later, Facebook released another batch of fastText embeddings, trained on Common Crawl and Wikipedia [2]. Other publicly available embeddings include 4 … these embeddings were trained on Wikipedia and Common Crawl, using CBOW architecture with …","url":["https://arxiv.org/pdf/1906.03134"]} |
| {"year":"2019","title":"Word Embeddings in Low Resource Gujarati Language","authors":["I Joshi, P Koringa, S Mitra - 2019 International Conference on Document Analysis …, 2019"],"snippet":"… (2014) released GloVe models trained on Wikipedia, Gigaword and Common Crawl (840B tokens). A notable effort is the work of Al-Rfou et al … Word embeddings for Gujarati language were released as a part of …","url":["https://ieeexplore.ieee.org/abstract/document/8893052/"]} |
| {"year":"2019","title":"Word Similarity Datasets for Thai: Construction and Evaluation","authors":["P Netisopakul, G Wohlgenannt, A Pulich - arXiv preprint arXiv:1904.04307, 2019"],"snippet":"… The models are trained on Common Crawl and Wikipedia corpora using fastText [13], regarding settings they report the us- age of the CBOW algorithm, 300 dimensions, a window size of 5 and 10 negatives. The model is large and contains 2M vectors …","url":["https://arxiv.org/pdf/1904.04307"]} |
| {"year":"2019","title":"Word Usage Similarity Estimation with Sentence Representations and Automatic Substitutes","authors":["AG Soler, M Apidianaki, A Allauzen - arXiv preprint arXiv:1905.08377, 2019"],"snippet":"… al., 2014). We use 300-dimensional GloVe embeddings pre-trained on Common Crawl (840B tokens).5 The representation of a sentence is obtained by averaging the GloVe embeddings of the words in the sentence. SIF (Smooth …","url":["https://arxiv.org/pdf/1905.08377"]} |
| {"year":"2019","title":"Word-embedding data as an alternative to questionnaires for measuring the affective meaning of concepts","authors":["A van Loon, J Freese - 2019"],"snippet":"… Here we include information from both algorithms. The GloVe embeddings we use have been trained on text obtained from Wikipedia, Twitter, and Common Crawl. The Word2vec embeddings we use are trained on the Google News Corpus …","url":["https://osf.io/preprints/socarxiv/r7ewx/download"]} |
| {"year":"2019","title":"Word-Embeddings and Grammar Features to Detect Language Disorders in Alzheimer's Disease PatientsauthorsJS Guerrero-Cristancho, JC Vásquez-Correa… - TecnoLógicas, 2020snippet… occurrence in a document [13]. Said authors considered a pre-trained model with the Common Crawl dataset, whose vocabulary size exceeds the 2 million and contains 840 billion words. A logistic regression classifier and …urlhttps://revistas.itm.edu.co/index.php/tecnologicas/article/download/1387/1456 |
| year2019titleWTMED at MEDIQA 2019: A Hybrid Approach to Biomedical Natural Language InferenceauthorsZ Wu, Y Song, S Huang, Y Tian, F Xia - Proceedings of the 18th BioNLP Workshop …, 2019snippetPage 1. Proceedings of the BioNLP 2019 workshop, pages 415–426 Florence, Italy, August 1, 2019. c 2019 Association for Computational Linguistics 415 WTMED at MEDIQA 2019: A Hybrid Approach to Biomedical Natural Language Inference …urlhttps://www.aclweb.org/anthology/W19-5044 |
| year2019titleX-WikiRE: A Large, Multilingual Resource for Relation Extraction as Machine ComprehensionauthorsM Abdou, C Sas, R Aralikatte, I Augenstein, A Søgaard - arXiv preprint arXiv …, 2019snippet… All monolingual models' word embeddings were initialised using fastText embeddings trained on each language's Wikipedia and common crawl corpora,7 except for the comparison experiments described in sub-section …urlhttps://arxiv.org/pdf/1908.05111 |
| year2019titleXLNet: Generalized Autoregressive Pretraining for Language UnderstandingauthorsZ Yang, Z Dai, Y Yang, J Carbonell, R Salakhutdinov… - arXiv preprint arXiv …, 2019snippetPage 1. XLNet: Generalized Autoregressive Pretraining for Language Understanding Zhilin Yang∗1, Zihang Dai∗12, Yiming Yang1, Jaime Carbonell1, Ruslan Salakhutdinov1, Quoc V. Le2 1Carnegie Mellon University, 2Google …urlhttps://arxiv.org/pdf/1906.08237 |
| year2019titleYNU Wb at HASOC 2019: Ordered Neurons LSTM with Attention for Identifying Hate Speech and Offensive LanguageauthorsB Wang, SL Yunxia Ding, X Zhou - Proceedings of the 11th annual meeting of the …, 2019snippet… And the pre-training word vector we used is fastText, which is provided by Mikolov et al. [7]. It is a 2 million word vector trained using subword information on Common Crawl with 600B tokens, and its dimension is 300. 4.3 Result …urlhttp://ceur-ws.org/Vol-2517/T3-2.pdf |
| year2019titleYNUWB at SemEval-2019 Task 6: K-max pooling CNN with average meta-embedding for identifying offensive languageauthorsB Wang, X Zhou, X Zhang - Proceedings of the 13th International Workshop on …, 2019snippet… FastText is provided by Mikolov et al. (Mikolov et al., 2018), it is a 2 million word vector trained using subword information on Common Crawl with 600B tokens, and its dimension is 300. Glove is provided by Jeffrey Pennington et al …urlhttps://www.aclweb.org/anthology/S19-2143 |
| year2019titleZastosowania metody rzutu przypadkowego w głębokich sieciach neuronowychauthorsPI WójciksnippetPage 1. Akademia Górniczo-Hutnicza im. Stanisława Staszica w Krakowie Wydział Informatyki, Elektroniki i Telekomunikacji Katedra Informatyki Rozprawa doktorska Zastosowania metody rzutu przypadkowego w głębokich …urlhttp://www.doktoraty.iet.agh.edu.pl/_media/2018:pwojcik:phd.pdf |
| year2019titleZero-Resource Cross-Lingual Named Entity RecognitionauthorsMS Bari, S Joty, P Jwalapuram - arXiv preprint arXiv:1911.09812, 2019snippet… We use FastText embeddings (Grave et al. 2018), which are trained on Common Crawl and Wikipedia, and SGD with a gradient clipping of 5.0 to train the model. We found that the learning rate was crucial for training, and …urlhttps://arxiv.org/pdf/1911.09812 |
| year2019titleZero-Resource Neural Machine Translation with Monolingual Pivot DataauthorsA Currey, K Heafieldsnippet… We use all available parallel corpora for EN↔DE (Europarl v7, Common Crawl, and News Commentary v11) and for EN↔RU (Common Crawl, News Commentary v11, Yandex Corpus, and Wiki Headlines) to train the initial …urlhttps://kheafield.com/papers/edinburgh/pivot.pdf |
| year2019titleZero-shot Learning and Knowledge Transfer in Music Classification and TaggingauthorsJ Choi, J Lee, J Park, J Nam - arXiv preprint arXiv:1906.08615, 2019snippet… We utilized a pretrained GloVe model available online. It contains 19 million vocabularies with 300 dimensional embedding trained from documents in Common Crawl data. We then evaluated the model on MTAT and GTZAN …urlhttps://arxiv.org/pdf/1906.08615 |
| year2019titleZero-Shot Question Classification Using Synthetic SamplesauthorsH Fu, C Yuan, X Wang, Z Sang, S Hu, Y Shi - 2018 5th IEEE International Conference …, 2019snippet… The detailed data set is listed in Table 1. All experiments follow the principle of counterpart parameters. The Chinese and English word vectors are pre-trained using Glove respectively on Samsung and Common Crawl corpus. The word dimension is 300 …urlhttps://ieeexplore.ieee.org/abstract/document/8691209/ |
| year2019titleZero-Shot Semantic Segmentation via Variational MappingauthorsN Kato, T Yamasaki, K Aizawa - Proceedings of the IEEE International Conference on …, 2019snippet… Dataset Unseen classes PASCAL-50 aeroplane, bicycle, bird, boat, bottle PASCAL-51 bus, car, cat, chair, cow PASCAL-52 diningtable, dog, horse, motorbike, person PASCAL-53 potted plant, sheep, sofa, train, tv/monitor …urlhttp://openaccess.thecvf.com/content_ICCVW_2019/papers/MDALC/Kato_Zero-Shot_Semantic_Segmentation_via_Variational_Mapping_ICCVW_2019_paper.pdf |
| |